1
2
3 package ssa
4
5 import "internal/buildcfg"
6 import "math"
7 import "cmd/internal/obj"
8 import "cmd/compile/internal/types"
9
10 func rewriteValueAMD64(v *Value) bool {
11 switch v.Op {
12 case OpAMD64ADCQ:
13 return rewriteValueAMD64_OpAMD64ADCQ(v)
14 case OpAMD64ADCQconst:
15 return rewriteValueAMD64_OpAMD64ADCQconst(v)
16 case OpAMD64ADDL:
17 return rewriteValueAMD64_OpAMD64ADDL(v)
18 case OpAMD64ADDLconst:
19 return rewriteValueAMD64_OpAMD64ADDLconst(v)
20 case OpAMD64ADDLconstmodify:
21 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
22 case OpAMD64ADDLload:
23 return rewriteValueAMD64_OpAMD64ADDLload(v)
24 case OpAMD64ADDLmodify:
25 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
26 case OpAMD64ADDQ:
27 return rewriteValueAMD64_OpAMD64ADDQ(v)
28 case OpAMD64ADDQcarry:
29 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
30 case OpAMD64ADDQconst:
31 return rewriteValueAMD64_OpAMD64ADDQconst(v)
32 case OpAMD64ADDQconstmodify:
33 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
34 case OpAMD64ADDQload:
35 return rewriteValueAMD64_OpAMD64ADDQload(v)
36 case OpAMD64ADDQmodify:
37 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
38 case OpAMD64ADDSD:
39 return rewriteValueAMD64_OpAMD64ADDSD(v)
40 case OpAMD64ADDSDload:
41 return rewriteValueAMD64_OpAMD64ADDSDload(v)
42 case OpAMD64ADDSS:
43 return rewriteValueAMD64_OpAMD64ADDSS(v)
44 case OpAMD64ADDSSload:
45 return rewriteValueAMD64_OpAMD64ADDSSload(v)
46 case OpAMD64ANDL:
47 return rewriteValueAMD64_OpAMD64ANDL(v)
48 case OpAMD64ANDLconst:
49 return rewriteValueAMD64_OpAMD64ANDLconst(v)
50 case OpAMD64ANDLconstmodify:
51 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
52 case OpAMD64ANDLload:
53 return rewriteValueAMD64_OpAMD64ANDLload(v)
54 case OpAMD64ANDLmodify:
55 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
56 case OpAMD64ANDNL:
57 return rewriteValueAMD64_OpAMD64ANDNL(v)
58 case OpAMD64ANDNQ:
59 return rewriteValueAMD64_OpAMD64ANDNQ(v)
60 case OpAMD64ANDQ:
61 return rewriteValueAMD64_OpAMD64ANDQ(v)
62 case OpAMD64ANDQconst:
63 return rewriteValueAMD64_OpAMD64ANDQconst(v)
64 case OpAMD64ANDQconstmodify:
65 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
66 case OpAMD64ANDQload:
67 return rewriteValueAMD64_OpAMD64ANDQload(v)
68 case OpAMD64ANDQmodify:
69 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
70 case OpAMD64BSFQ:
71 return rewriteValueAMD64_OpAMD64BSFQ(v)
72 case OpAMD64BSWAPL:
73 return rewriteValueAMD64_OpAMD64BSWAPL(v)
74 case OpAMD64BSWAPQ:
75 return rewriteValueAMD64_OpAMD64BSWAPQ(v)
76 case OpAMD64BTCQconst:
77 return rewriteValueAMD64_OpAMD64BTCQconst(v)
78 case OpAMD64BTLconst:
79 return rewriteValueAMD64_OpAMD64BTLconst(v)
80 case OpAMD64BTQconst:
81 return rewriteValueAMD64_OpAMD64BTQconst(v)
82 case OpAMD64BTRQconst:
83 return rewriteValueAMD64_OpAMD64BTRQconst(v)
84 case OpAMD64BTSQconst:
85 return rewriteValueAMD64_OpAMD64BTSQconst(v)
86 case OpAMD64CMOVLCC:
87 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
88 case OpAMD64CMOVLCS:
89 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
90 case OpAMD64CMOVLEQ:
91 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
92 case OpAMD64CMOVLGE:
93 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
94 case OpAMD64CMOVLGT:
95 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
96 case OpAMD64CMOVLHI:
97 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
98 case OpAMD64CMOVLLE:
99 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
100 case OpAMD64CMOVLLS:
101 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
102 case OpAMD64CMOVLLT:
103 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
104 case OpAMD64CMOVLNE:
105 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
106 case OpAMD64CMOVQCC:
107 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
108 case OpAMD64CMOVQCS:
109 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
110 case OpAMD64CMOVQEQ:
111 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
112 case OpAMD64CMOVQGE:
113 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
114 case OpAMD64CMOVQGT:
115 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
116 case OpAMD64CMOVQHI:
117 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
118 case OpAMD64CMOVQLE:
119 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
120 case OpAMD64CMOVQLS:
121 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
122 case OpAMD64CMOVQLT:
123 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
124 case OpAMD64CMOVQNE:
125 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
126 case OpAMD64CMOVWCC:
127 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
128 case OpAMD64CMOVWCS:
129 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
130 case OpAMD64CMOVWEQ:
131 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
132 case OpAMD64CMOVWGE:
133 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
134 case OpAMD64CMOVWGT:
135 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
136 case OpAMD64CMOVWHI:
137 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
138 case OpAMD64CMOVWLE:
139 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
140 case OpAMD64CMOVWLS:
141 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
142 case OpAMD64CMOVWLT:
143 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
144 case OpAMD64CMOVWNE:
145 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
146 case OpAMD64CMPB:
147 return rewriteValueAMD64_OpAMD64CMPB(v)
148 case OpAMD64CMPBconst:
149 return rewriteValueAMD64_OpAMD64CMPBconst(v)
150 case OpAMD64CMPBconstload:
151 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
152 case OpAMD64CMPBload:
153 return rewriteValueAMD64_OpAMD64CMPBload(v)
154 case OpAMD64CMPL:
155 return rewriteValueAMD64_OpAMD64CMPL(v)
156 case OpAMD64CMPLconst:
157 return rewriteValueAMD64_OpAMD64CMPLconst(v)
158 case OpAMD64CMPLconstload:
159 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
160 case OpAMD64CMPLload:
161 return rewriteValueAMD64_OpAMD64CMPLload(v)
162 case OpAMD64CMPQ:
163 return rewriteValueAMD64_OpAMD64CMPQ(v)
164 case OpAMD64CMPQconst:
165 return rewriteValueAMD64_OpAMD64CMPQconst(v)
166 case OpAMD64CMPQconstload:
167 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
168 case OpAMD64CMPQload:
169 return rewriteValueAMD64_OpAMD64CMPQload(v)
170 case OpAMD64CMPW:
171 return rewriteValueAMD64_OpAMD64CMPW(v)
172 case OpAMD64CMPWconst:
173 return rewriteValueAMD64_OpAMD64CMPWconst(v)
174 case OpAMD64CMPWconstload:
175 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
176 case OpAMD64CMPWload:
177 return rewriteValueAMD64_OpAMD64CMPWload(v)
178 case OpAMD64CMPXCHGLlock:
179 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
180 case OpAMD64CMPXCHGQlock:
181 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
182 case OpAMD64DIVSD:
183 return rewriteValueAMD64_OpAMD64DIVSD(v)
184 case OpAMD64DIVSDload:
185 return rewriteValueAMD64_OpAMD64DIVSDload(v)
186 case OpAMD64DIVSS:
187 return rewriteValueAMD64_OpAMD64DIVSS(v)
188 case OpAMD64DIVSSload:
189 return rewriteValueAMD64_OpAMD64DIVSSload(v)
190 case OpAMD64HMULL:
191 return rewriteValueAMD64_OpAMD64HMULL(v)
192 case OpAMD64HMULLU:
193 return rewriteValueAMD64_OpAMD64HMULLU(v)
194 case OpAMD64HMULQ:
195 return rewriteValueAMD64_OpAMD64HMULQ(v)
196 case OpAMD64HMULQU:
197 return rewriteValueAMD64_OpAMD64HMULQU(v)
198 case OpAMD64LEAL:
199 return rewriteValueAMD64_OpAMD64LEAL(v)
200 case OpAMD64LEAL1:
201 return rewriteValueAMD64_OpAMD64LEAL1(v)
202 case OpAMD64LEAL2:
203 return rewriteValueAMD64_OpAMD64LEAL2(v)
204 case OpAMD64LEAL4:
205 return rewriteValueAMD64_OpAMD64LEAL4(v)
206 case OpAMD64LEAL8:
207 return rewriteValueAMD64_OpAMD64LEAL8(v)
208 case OpAMD64LEAQ:
209 return rewriteValueAMD64_OpAMD64LEAQ(v)
210 case OpAMD64LEAQ1:
211 return rewriteValueAMD64_OpAMD64LEAQ1(v)
212 case OpAMD64LEAQ2:
213 return rewriteValueAMD64_OpAMD64LEAQ2(v)
214 case OpAMD64LEAQ4:
215 return rewriteValueAMD64_OpAMD64LEAQ4(v)
216 case OpAMD64LEAQ8:
217 return rewriteValueAMD64_OpAMD64LEAQ8(v)
218 case OpAMD64MOVBELstore:
219 return rewriteValueAMD64_OpAMD64MOVBELstore(v)
220 case OpAMD64MOVBEQstore:
221 return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
222 case OpAMD64MOVBEWstore:
223 return rewriteValueAMD64_OpAMD64MOVBEWstore(v)
224 case OpAMD64MOVBQSX:
225 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
226 case OpAMD64MOVBQSXload:
227 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
228 case OpAMD64MOVBQZX:
229 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
230 case OpAMD64MOVBatomicload:
231 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
232 case OpAMD64MOVBload:
233 return rewriteValueAMD64_OpAMD64MOVBload(v)
234 case OpAMD64MOVBstore:
235 return rewriteValueAMD64_OpAMD64MOVBstore(v)
236 case OpAMD64MOVBstoreconst:
237 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
238 case OpAMD64MOVLQSX:
239 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
240 case OpAMD64MOVLQSXload:
241 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
242 case OpAMD64MOVLQZX:
243 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
244 case OpAMD64MOVLatomicload:
245 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
246 case OpAMD64MOVLf2i:
247 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
248 case OpAMD64MOVLi2f:
249 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
250 case OpAMD64MOVLload:
251 return rewriteValueAMD64_OpAMD64MOVLload(v)
252 case OpAMD64MOVLstore:
253 return rewriteValueAMD64_OpAMD64MOVLstore(v)
254 case OpAMD64MOVLstoreconst:
255 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
256 case OpAMD64MOVOload:
257 return rewriteValueAMD64_OpAMD64MOVOload(v)
258 case OpAMD64MOVOstore:
259 return rewriteValueAMD64_OpAMD64MOVOstore(v)
260 case OpAMD64MOVOstoreconst:
261 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
262 case OpAMD64MOVQatomicload:
263 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
264 case OpAMD64MOVQf2i:
265 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
266 case OpAMD64MOVQi2f:
267 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
268 case OpAMD64MOVQload:
269 return rewriteValueAMD64_OpAMD64MOVQload(v)
270 case OpAMD64MOVQstore:
271 return rewriteValueAMD64_OpAMD64MOVQstore(v)
272 case OpAMD64MOVQstoreconst:
273 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
274 case OpAMD64MOVSDload:
275 return rewriteValueAMD64_OpAMD64MOVSDload(v)
276 case OpAMD64MOVSDstore:
277 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
278 case OpAMD64MOVSSload:
279 return rewriteValueAMD64_OpAMD64MOVSSload(v)
280 case OpAMD64MOVSSstore:
281 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
282 case OpAMD64MOVWQSX:
283 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
284 case OpAMD64MOVWQSXload:
285 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
286 case OpAMD64MOVWQZX:
287 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
288 case OpAMD64MOVWload:
289 return rewriteValueAMD64_OpAMD64MOVWload(v)
290 case OpAMD64MOVWstore:
291 return rewriteValueAMD64_OpAMD64MOVWstore(v)
292 case OpAMD64MOVWstoreconst:
293 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
294 case OpAMD64MULL:
295 return rewriteValueAMD64_OpAMD64MULL(v)
296 case OpAMD64MULLconst:
297 return rewriteValueAMD64_OpAMD64MULLconst(v)
298 case OpAMD64MULQ:
299 return rewriteValueAMD64_OpAMD64MULQ(v)
300 case OpAMD64MULQconst:
301 return rewriteValueAMD64_OpAMD64MULQconst(v)
302 case OpAMD64MULSD:
303 return rewriteValueAMD64_OpAMD64MULSD(v)
304 case OpAMD64MULSDload:
305 return rewriteValueAMD64_OpAMD64MULSDload(v)
306 case OpAMD64MULSS:
307 return rewriteValueAMD64_OpAMD64MULSS(v)
308 case OpAMD64MULSSload:
309 return rewriteValueAMD64_OpAMD64MULSSload(v)
310 case OpAMD64NEGL:
311 return rewriteValueAMD64_OpAMD64NEGL(v)
312 case OpAMD64NEGQ:
313 return rewriteValueAMD64_OpAMD64NEGQ(v)
314 case OpAMD64NOTL:
315 return rewriteValueAMD64_OpAMD64NOTL(v)
316 case OpAMD64NOTQ:
317 return rewriteValueAMD64_OpAMD64NOTQ(v)
318 case OpAMD64ORL:
319 return rewriteValueAMD64_OpAMD64ORL(v)
320 case OpAMD64ORLconst:
321 return rewriteValueAMD64_OpAMD64ORLconst(v)
322 case OpAMD64ORLconstmodify:
323 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
324 case OpAMD64ORLload:
325 return rewriteValueAMD64_OpAMD64ORLload(v)
326 case OpAMD64ORLmodify:
327 return rewriteValueAMD64_OpAMD64ORLmodify(v)
328 case OpAMD64ORQ:
329 return rewriteValueAMD64_OpAMD64ORQ(v)
330 case OpAMD64ORQconst:
331 return rewriteValueAMD64_OpAMD64ORQconst(v)
332 case OpAMD64ORQconstmodify:
333 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
334 case OpAMD64ORQload:
335 return rewriteValueAMD64_OpAMD64ORQload(v)
336 case OpAMD64ORQmodify:
337 return rewriteValueAMD64_OpAMD64ORQmodify(v)
338 case OpAMD64ROLB:
339 return rewriteValueAMD64_OpAMD64ROLB(v)
340 case OpAMD64ROLBconst:
341 return rewriteValueAMD64_OpAMD64ROLBconst(v)
342 case OpAMD64ROLL:
343 return rewriteValueAMD64_OpAMD64ROLL(v)
344 case OpAMD64ROLLconst:
345 return rewriteValueAMD64_OpAMD64ROLLconst(v)
346 case OpAMD64ROLQ:
347 return rewriteValueAMD64_OpAMD64ROLQ(v)
348 case OpAMD64ROLQconst:
349 return rewriteValueAMD64_OpAMD64ROLQconst(v)
350 case OpAMD64ROLW:
351 return rewriteValueAMD64_OpAMD64ROLW(v)
352 case OpAMD64ROLWconst:
353 return rewriteValueAMD64_OpAMD64ROLWconst(v)
354 case OpAMD64RORB:
355 return rewriteValueAMD64_OpAMD64RORB(v)
356 case OpAMD64RORL:
357 return rewriteValueAMD64_OpAMD64RORL(v)
358 case OpAMD64RORQ:
359 return rewriteValueAMD64_OpAMD64RORQ(v)
360 case OpAMD64RORW:
361 return rewriteValueAMD64_OpAMD64RORW(v)
362 case OpAMD64SARB:
363 return rewriteValueAMD64_OpAMD64SARB(v)
364 case OpAMD64SARBconst:
365 return rewriteValueAMD64_OpAMD64SARBconst(v)
366 case OpAMD64SARL:
367 return rewriteValueAMD64_OpAMD64SARL(v)
368 case OpAMD64SARLconst:
369 return rewriteValueAMD64_OpAMD64SARLconst(v)
370 case OpAMD64SARQ:
371 return rewriteValueAMD64_OpAMD64SARQ(v)
372 case OpAMD64SARQconst:
373 return rewriteValueAMD64_OpAMD64SARQconst(v)
374 case OpAMD64SARW:
375 return rewriteValueAMD64_OpAMD64SARW(v)
376 case OpAMD64SARWconst:
377 return rewriteValueAMD64_OpAMD64SARWconst(v)
378 case OpAMD64SARXLload:
379 return rewriteValueAMD64_OpAMD64SARXLload(v)
380 case OpAMD64SARXQload:
381 return rewriteValueAMD64_OpAMD64SARXQload(v)
382 case OpAMD64SBBLcarrymask:
383 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
384 case OpAMD64SBBQ:
385 return rewriteValueAMD64_OpAMD64SBBQ(v)
386 case OpAMD64SBBQcarrymask:
387 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
388 case OpAMD64SBBQconst:
389 return rewriteValueAMD64_OpAMD64SBBQconst(v)
390 case OpAMD64SETA:
391 return rewriteValueAMD64_OpAMD64SETA(v)
392 case OpAMD64SETAE:
393 return rewriteValueAMD64_OpAMD64SETAE(v)
394 case OpAMD64SETAEstore:
395 return rewriteValueAMD64_OpAMD64SETAEstore(v)
396 case OpAMD64SETAstore:
397 return rewriteValueAMD64_OpAMD64SETAstore(v)
398 case OpAMD64SETB:
399 return rewriteValueAMD64_OpAMD64SETB(v)
400 case OpAMD64SETBE:
401 return rewriteValueAMD64_OpAMD64SETBE(v)
402 case OpAMD64SETBEstore:
403 return rewriteValueAMD64_OpAMD64SETBEstore(v)
404 case OpAMD64SETBstore:
405 return rewriteValueAMD64_OpAMD64SETBstore(v)
406 case OpAMD64SETEQ:
407 return rewriteValueAMD64_OpAMD64SETEQ(v)
408 case OpAMD64SETEQstore:
409 return rewriteValueAMD64_OpAMD64SETEQstore(v)
410 case OpAMD64SETG:
411 return rewriteValueAMD64_OpAMD64SETG(v)
412 case OpAMD64SETGE:
413 return rewriteValueAMD64_OpAMD64SETGE(v)
414 case OpAMD64SETGEstore:
415 return rewriteValueAMD64_OpAMD64SETGEstore(v)
416 case OpAMD64SETGstore:
417 return rewriteValueAMD64_OpAMD64SETGstore(v)
418 case OpAMD64SETL:
419 return rewriteValueAMD64_OpAMD64SETL(v)
420 case OpAMD64SETLE:
421 return rewriteValueAMD64_OpAMD64SETLE(v)
422 case OpAMD64SETLEstore:
423 return rewriteValueAMD64_OpAMD64SETLEstore(v)
424 case OpAMD64SETLstore:
425 return rewriteValueAMD64_OpAMD64SETLstore(v)
426 case OpAMD64SETNE:
427 return rewriteValueAMD64_OpAMD64SETNE(v)
428 case OpAMD64SETNEstore:
429 return rewriteValueAMD64_OpAMD64SETNEstore(v)
430 case OpAMD64SHLL:
431 return rewriteValueAMD64_OpAMD64SHLL(v)
432 case OpAMD64SHLLconst:
433 return rewriteValueAMD64_OpAMD64SHLLconst(v)
434 case OpAMD64SHLQ:
435 return rewriteValueAMD64_OpAMD64SHLQ(v)
436 case OpAMD64SHLQconst:
437 return rewriteValueAMD64_OpAMD64SHLQconst(v)
438 case OpAMD64SHLXLload:
439 return rewriteValueAMD64_OpAMD64SHLXLload(v)
440 case OpAMD64SHLXQload:
441 return rewriteValueAMD64_OpAMD64SHLXQload(v)
442 case OpAMD64SHRB:
443 return rewriteValueAMD64_OpAMD64SHRB(v)
444 case OpAMD64SHRBconst:
445 return rewriteValueAMD64_OpAMD64SHRBconst(v)
446 case OpAMD64SHRL:
447 return rewriteValueAMD64_OpAMD64SHRL(v)
448 case OpAMD64SHRLconst:
449 return rewriteValueAMD64_OpAMD64SHRLconst(v)
450 case OpAMD64SHRQ:
451 return rewriteValueAMD64_OpAMD64SHRQ(v)
452 case OpAMD64SHRQconst:
453 return rewriteValueAMD64_OpAMD64SHRQconst(v)
454 case OpAMD64SHRW:
455 return rewriteValueAMD64_OpAMD64SHRW(v)
456 case OpAMD64SHRWconst:
457 return rewriteValueAMD64_OpAMD64SHRWconst(v)
458 case OpAMD64SHRXLload:
459 return rewriteValueAMD64_OpAMD64SHRXLload(v)
460 case OpAMD64SHRXQload:
461 return rewriteValueAMD64_OpAMD64SHRXQload(v)
462 case OpAMD64SUBL:
463 return rewriteValueAMD64_OpAMD64SUBL(v)
464 case OpAMD64SUBLconst:
465 return rewriteValueAMD64_OpAMD64SUBLconst(v)
466 case OpAMD64SUBLload:
467 return rewriteValueAMD64_OpAMD64SUBLload(v)
468 case OpAMD64SUBLmodify:
469 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
470 case OpAMD64SUBQ:
471 return rewriteValueAMD64_OpAMD64SUBQ(v)
472 case OpAMD64SUBQborrow:
473 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
474 case OpAMD64SUBQconst:
475 return rewriteValueAMD64_OpAMD64SUBQconst(v)
476 case OpAMD64SUBQload:
477 return rewriteValueAMD64_OpAMD64SUBQload(v)
478 case OpAMD64SUBQmodify:
479 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
480 case OpAMD64SUBSD:
481 return rewriteValueAMD64_OpAMD64SUBSD(v)
482 case OpAMD64SUBSDload:
483 return rewriteValueAMD64_OpAMD64SUBSDload(v)
484 case OpAMD64SUBSS:
485 return rewriteValueAMD64_OpAMD64SUBSS(v)
486 case OpAMD64SUBSSload:
487 return rewriteValueAMD64_OpAMD64SUBSSload(v)
488 case OpAMD64TESTB:
489 return rewriteValueAMD64_OpAMD64TESTB(v)
490 case OpAMD64TESTBconst:
491 return rewriteValueAMD64_OpAMD64TESTBconst(v)
492 case OpAMD64TESTL:
493 return rewriteValueAMD64_OpAMD64TESTL(v)
494 case OpAMD64TESTLconst:
495 return rewriteValueAMD64_OpAMD64TESTLconst(v)
496 case OpAMD64TESTQ:
497 return rewriteValueAMD64_OpAMD64TESTQ(v)
498 case OpAMD64TESTQconst:
499 return rewriteValueAMD64_OpAMD64TESTQconst(v)
500 case OpAMD64TESTW:
501 return rewriteValueAMD64_OpAMD64TESTW(v)
502 case OpAMD64TESTWconst:
503 return rewriteValueAMD64_OpAMD64TESTWconst(v)
504 case OpAMD64XADDLlock:
505 return rewriteValueAMD64_OpAMD64XADDLlock(v)
506 case OpAMD64XADDQlock:
507 return rewriteValueAMD64_OpAMD64XADDQlock(v)
508 case OpAMD64XCHGL:
509 return rewriteValueAMD64_OpAMD64XCHGL(v)
510 case OpAMD64XCHGQ:
511 return rewriteValueAMD64_OpAMD64XCHGQ(v)
512 case OpAMD64XORL:
513 return rewriteValueAMD64_OpAMD64XORL(v)
514 case OpAMD64XORLconst:
515 return rewriteValueAMD64_OpAMD64XORLconst(v)
516 case OpAMD64XORLconstmodify:
517 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
518 case OpAMD64XORLload:
519 return rewriteValueAMD64_OpAMD64XORLload(v)
520 case OpAMD64XORLmodify:
521 return rewriteValueAMD64_OpAMD64XORLmodify(v)
522 case OpAMD64XORQ:
523 return rewriteValueAMD64_OpAMD64XORQ(v)
524 case OpAMD64XORQconst:
525 return rewriteValueAMD64_OpAMD64XORQconst(v)
526 case OpAMD64XORQconstmodify:
527 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
528 case OpAMD64XORQload:
529 return rewriteValueAMD64_OpAMD64XORQload(v)
530 case OpAMD64XORQmodify:
531 return rewriteValueAMD64_OpAMD64XORQmodify(v)
532 case OpAdd16:
533 v.Op = OpAMD64ADDL
534 return true
535 case OpAdd32:
536 v.Op = OpAMD64ADDL
537 return true
538 case OpAdd32F:
539 v.Op = OpAMD64ADDSS
540 return true
541 case OpAdd64:
542 v.Op = OpAMD64ADDQ
543 return true
544 case OpAdd64F:
545 v.Op = OpAMD64ADDSD
546 return true
547 case OpAdd8:
548 v.Op = OpAMD64ADDL
549 return true
550 case OpAddPtr:
551 v.Op = OpAMD64ADDQ
552 return true
553 case OpAddr:
554 return rewriteValueAMD64_OpAddr(v)
555 case OpAnd16:
556 v.Op = OpAMD64ANDL
557 return true
558 case OpAnd32:
559 v.Op = OpAMD64ANDL
560 return true
561 case OpAnd64:
562 v.Op = OpAMD64ANDQ
563 return true
564 case OpAnd8:
565 v.Op = OpAMD64ANDL
566 return true
567 case OpAndB:
568 v.Op = OpAMD64ANDL
569 return true
570 case OpAtomicAdd32:
571 return rewriteValueAMD64_OpAtomicAdd32(v)
572 case OpAtomicAdd64:
573 return rewriteValueAMD64_OpAtomicAdd64(v)
574 case OpAtomicAnd32:
575 return rewriteValueAMD64_OpAtomicAnd32(v)
576 case OpAtomicAnd8:
577 return rewriteValueAMD64_OpAtomicAnd8(v)
578 case OpAtomicCompareAndSwap32:
579 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
580 case OpAtomicCompareAndSwap64:
581 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
582 case OpAtomicExchange32:
583 return rewriteValueAMD64_OpAtomicExchange32(v)
584 case OpAtomicExchange64:
585 return rewriteValueAMD64_OpAtomicExchange64(v)
586 case OpAtomicLoad32:
587 return rewriteValueAMD64_OpAtomicLoad32(v)
588 case OpAtomicLoad64:
589 return rewriteValueAMD64_OpAtomicLoad64(v)
590 case OpAtomicLoad8:
591 return rewriteValueAMD64_OpAtomicLoad8(v)
592 case OpAtomicLoadPtr:
593 return rewriteValueAMD64_OpAtomicLoadPtr(v)
594 case OpAtomicOr32:
595 return rewriteValueAMD64_OpAtomicOr32(v)
596 case OpAtomicOr8:
597 return rewriteValueAMD64_OpAtomicOr8(v)
598 case OpAtomicStore32:
599 return rewriteValueAMD64_OpAtomicStore32(v)
600 case OpAtomicStore64:
601 return rewriteValueAMD64_OpAtomicStore64(v)
602 case OpAtomicStore8:
603 return rewriteValueAMD64_OpAtomicStore8(v)
604 case OpAtomicStorePtrNoWB:
605 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
606 case OpAvg64u:
607 v.Op = OpAMD64AVGQU
608 return true
609 case OpBitLen16:
610 return rewriteValueAMD64_OpBitLen16(v)
611 case OpBitLen32:
612 return rewriteValueAMD64_OpBitLen32(v)
613 case OpBitLen64:
614 return rewriteValueAMD64_OpBitLen64(v)
615 case OpBitLen8:
616 return rewriteValueAMD64_OpBitLen8(v)
617 case OpBswap16:
618 return rewriteValueAMD64_OpBswap16(v)
619 case OpBswap32:
620 v.Op = OpAMD64BSWAPL
621 return true
622 case OpBswap64:
623 v.Op = OpAMD64BSWAPQ
624 return true
625 case OpCeil:
626 return rewriteValueAMD64_OpCeil(v)
627 case OpClosureCall:
628 v.Op = OpAMD64CALLclosure
629 return true
630 case OpCom16:
631 v.Op = OpAMD64NOTL
632 return true
633 case OpCom32:
634 v.Op = OpAMD64NOTL
635 return true
636 case OpCom64:
637 v.Op = OpAMD64NOTQ
638 return true
639 case OpCom8:
640 v.Op = OpAMD64NOTL
641 return true
642 case OpCondSelect:
643 return rewriteValueAMD64_OpCondSelect(v)
644 case OpConst16:
645 return rewriteValueAMD64_OpConst16(v)
646 case OpConst32:
647 v.Op = OpAMD64MOVLconst
648 return true
649 case OpConst32F:
650 v.Op = OpAMD64MOVSSconst
651 return true
652 case OpConst64:
653 v.Op = OpAMD64MOVQconst
654 return true
655 case OpConst64F:
656 v.Op = OpAMD64MOVSDconst
657 return true
658 case OpConst8:
659 return rewriteValueAMD64_OpConst8(v)
660 case OpConstBool:
661 return rewriteValueAMD64_OpConstBool(v)
662 case OpConstNil:
663 return rewriteValueAMD64_OpConstNil(v)
664 case OpCtz16:
665 return rewriteValueAMD64_OpCtz16(v)
666 case OpCtz16NonZero:
667 return rewriteValueAMD64_OpCtz16NonZero(v)
668 case OpCtz32:
669 return rewriteValueAMD64_OpCtz32(v)
670 case OpCtz32NonZero:
671 return rewriteValueAMD64_OpCtz32NonZero(v)
672 case OpCtz64:
673 return rewriteValueAMD64_OpCtz64(v)
674 case OpCtz64NonZero:
675 return rewriteValueAMD64_OpCtz64NonZero(v)
676 case OpCtz8:
677 return rewriteValueAMD64_OpCtz8(v)
678 case OpCtz8NonZero:
679 return rewriteValueAMD64_OpCtz8NonZero(v)
680 case OpCvt32Fto32:
681 v.Op = OpAMD64CVTTSS2SL
682 return true
683 case OpCvt32Fto64:
684 v.Op = OpAMD64CVTTSS2SQ
685 return true
686 case OpCvt32Fto64F:
687 v.Op = OpAMD64CVTSS2SD
688 return true
689 case OpCvt32to32F:
690 v.Op = OpAMD64CVTSL2SS
691 return true
692 case OpCvt32to64F:
693 v.Op = OpAMD64CVTSL2SD
694 return true
695 case OpCvt64Fto32:
696 v.Op = OpAMD64CVTTSD2SL
697 return true
698 case OpCvt64Fto32F:
699 v.Op = OpAMD64CVTSD2SS
700 return true
701 case OpCvt64Fto64:
702 v.Op = OpAMD64CVTTSD2SQ
703 return true
704 case OpCvt64to32F:
705 v.Op = OpAMD64CVTSQ2SS
706 return true
707 case OpCvt64to64F:
708 v.Op = OpAMD64CVTSQ2SD
709 return true
710 case OpCvtBoolToUint8:
711 v.Op = OpCopy
712 return true
713 case OpDiv128u:
714 v.Op = OpAMD64DIVQU2
715 return true
716 case OpDiv16:
717 return rewriteValueAMD64_OpDiv16(v)
718 case OpDiv16u:
719 return rewriteValueAMD64_OpDiv16u(v)
720 case OpDiv32:
721 return rewriteValueAMD64_OpDiv32(v)
722 case OpDiv32F:
723 v.Op = OpAMD64DIVSS
724 return true
725 case OpDiv32u:
726 return rewriteValueAMD64_OpDiv32u(v)
727 case OpDiv64:
728 return rewriteValueAMD64_OpDiv64(v)
729 case OpDiv64F:
730 v.Op = OpAMD64DIVSD
731 return true
732 case OpDiv64u:
733 return rewriteValueAMD64_OpDiv64u(v)
734 case OpDiv8:
735 return rewriteValueAMD64_OpDiv8(v)
736 case OpDiv8u:
737 return rewriteValueAMD64_OpDiv8u(v)
738 case OpEq16:
739 return rewriteValueAMD64_OpEq16(v)
740 case OpEq32:
741 return rewriteValueAMD64_OpEq32(v)
742 case OpEq32F:
743 return rewriteValueAMD64_OpEq32F(v)
744 case OpEq64:
745 return rewriteValueAMD64_OpEq64(v)
746 case OpEq64F:
747 return rewriteValueAMD64_OpEq64F(v)
748 case OpEq8:
749 return rewriteValueAMD64_OpEq8(v)
750 case OpEqB:
751 return rewriteValueAMD64_OpEqB(v)
752 case OpEqPtr:
753 return rewriteValueAMD64_OpEqPtr(v)
754 case OpFMA:
755 return rewriteValueAMD64_OpFMA(v)
756 case OpFloor:
757 return rewriteValueAMD64_OpFloor(v)
758 case OpGetCallerPC:
759 v.Op = OpAMD64LoweredGetCallerPC
760 return true
761 case OpGetCallerSP:
762 v.Op = OpAMD64LoweredGetCallerSP
763 return true
764 case OpGetClosurePtr:
765 v.Op = OpAMD64LoweredGetClosurePtr
766 return true
767 case OpGetG:
768 return rewriteValueAMD64_OpGetG(v)
769 case OpHasCPUFeature:
770 return rewriteValueAMD64_OpHasCPUFeature(v)
771 case OpHmul32:
772 v.Op = OpAMD64HMULL
773 return true
774 case OpHmul32u:
775 v.Op = OpAMD64HMULLU
776 return true
777 case OpHmul64:
778 v.Op = OpAMD64HMULQ
779 return true
780 case OpHmul64u:
781 v.Op = OpAMD64HMULQU
782 return true
783 case OpInterCall:
784 v.Op = OpAMD64CALLinter
785 return true
786 case OpIsInBounds:
787 return rewriteValueAMD64_OpIsInBounds(v)
788 case OpIsNonNil:
789 return rewriteValueAMD64_OpIsNonNil(v)
790 case OpIsSliceInBounds:
791 return rewriteValueAMD64_OpIsSliceInBounds(v)
792 case OpLeq16:
793 return rewriteValueAMD64_OpLeq16(v)
794 case OpLeq16U:
795 return rewriteValueAMD64_OpLeq16U(v)
796 case OpLeq32:
797 return rewriteValueAMD64_OpLeq32(v)
798 case OpLeq32F:
799 return rewriteValueAMD64_OpLeq32F(v)
800 case OpLeq32U:
801 return rewriteValueAMD64_OpLeq32U(v)
802 case OpLeq64:
803 return rewriteValueAMD64_OpLeq64(v)
804 case OpLeq64F:
805 return rewriteValueAMD64_OpLeq64F(v)
806 case OpLeq64U:
807 return rewriteValueAMD64_OpLeq64U(v)
808 case OpLeq8:
809 return rewriteValueAMD64_OpLeq8(v)
810 case OpLeq8U:
811 return rewriteValueAMD64_OpLeq8U(v)
812 case OpLess16:
813 return rewriteValueAMD64_OpLess16(v)
814 case OpLess16U:
815 return rewriteValueAMD64_OpLess16U(v)
816 case OpLess32:
817 return rewriteValueAMD64_OpLess32(v)
818 case OpLess32F:
819 return rewriteValueAMD64_OpLess32F(v)
820 case OpLess32U:
821 return rewriteValueAMD64_OpLess32U(v)
822 case OpLess64:
823 return rewriteValueAMD64_OpLess64(v)
824 case OpLess64F:
825 return rewriteValueAMD64_OpLess64F(v)
826 case OpLess64U:
827 return rewriteValueAMD64_OpLess64U(v)
828 case OpLess8:
829 return rewriteValueAMD64_OpLess8(v)
830 case OpLess8U:
831 return rewriteValueAMD64_OpLess8U(v)
832 case OpLoad:
833 return rewriteValueAMD64_OpLoad(v)
834 case OpLocalAddr:
835 return rewriteValueAMD64_OpLocalAddr(v)
836 case OpLsh16x16:
837 return rewriteValueAMD64_OpLsh16x16(v)
838 case OpLsh16x32:
839 return rewriteValueAMD64_OpLsh16x32(v)
840 case OpLsh16x64:
841 return rewriteValueAMD64_OpLsh16x64(v)
842 case OpLsh16x8:
843 return rewriteValueAMD64_OpLsh16x8(v)
844 case OpLsh32x16:
845 return rewriteValueAMD64_OpLsh32x16(v)
846 case OpLsh32x32:
847 return rewriteValueAMD64_OpLsh32x32(v)
848 case OpLsh32x64:
849 return rewriteValueAMD64_OpLsh32x64(v)
850 case OpLsh32x8:
851 return rewriteValueAMD64_OpLsh32x8(v)
852 case OpLsh64x16:
853 return rewriteValueAMD64_OpLsh64x16(v)
854 case OpLsh64x32:
855 return rewriteValueAMD64_OpLsh64x32(v)
856 case OpLsh64x64:
857 return rewriteValueAMD64_OpLsh64x64(v)
858 case OpLsh64x8:
859 return rewriteValueAMD64_OpLsh64x8(v)
860 case OpLsh8x16:
861 return rewriteValueAMD64_OpLsh8x16(v)
862 case OpLsh8x32:
863 return rewriteValueAMD64_OpLsh8x32(v)
864 case OpLsh8x64:
865 return rewriteValueAMD64_OpLsh8x64(v)
866 case OpLsh8x8:
867 return rewriteValueAMD64_OpLsh8x8(v)
868 case OpMax32F:
869 return rewriteValueAMD64_OpMax32F(v)
870 case OpMax64F:
871 return rewriteValueAMD64_OpMax64F(v)
872 case OpMin32F:
873 return rewriteValueAMD64_OpMin32F(v)
874 case OpMin64F:
875 return rewriteValueAMD64_OpMin64F(v)
876 case OpMod16:
877 return rewriteValueAMD64_OpMod16(v)
878 case OpMod16u:
879 return rewriteValueAMD64_OpMod16u(v)
880 case OpMod32:
881 return rewriteValueAMD64_OpMod32(v)
882 case OpMod32u:
883 return rewriteValueAMD64_OpMod32u(v)
884 case OpMod64:
885 return rewriteValueAMD64_OpMod64(v)
886 case OpMod64u:
887 return rewriteValueAMD64_OpMod64u(v)
888 case OpMod8:
889 return rewriteValueAMD64_OpMod8(v)
890 case OpMod8u:
891 return rewriteValueAMD64_OpMod8u(v)
892 case OpMove:
893 return rewriteValueAMD64_OpMove(v)
894 case OpMul16:
895 v.Op = OpAMD64MULL
896 return true
897 case OpMul32:
898 v.Op = OpAMD64MULL
899 return true
900 case OpMul32F:
901 v.Op = OpAMD64MULSS
902 return true
903 case OpMul64:
904 v.Op = OpAMD64MULQ
905 return true
906 case OpMul64F:
907 v.Op = OpAMD64MULSD
908 return true
909 case OpMul64uhilo:
910 v.Op = OpAMD64MULQU2
911 return true
912 case OpMul8:
913 v.Op = OpAMD64MULL
914 return true
915 case OpNeg16:
916 v.Op = OpAMD64NEGL
917 return true
918 case OpNeg32:
919 v.Op = OpAMD64NEGL
920 return true
921 case OpNeg32F:
922 return rewriteValueAMD64_OpNeg32F(v)
923 case OpNeg64:
924 v.Op = OpAMD64NEGQ
925 return true
926 case OpNeg64F:
927 return rewriteValueAMD64_OpNeg64F(v)
928 case OpNeg8:
929 v.Op = OpAMD64NEGL
930 return true
931 case OpNeq16:
932 return rewriteValueAMD64_OpNeq16(v)
933 case OpNeq32:
934 return rewriteValueAMD64_OpNeq32(v)
935 case OpNeq32F:
936 return rewriteValueAMD64_OpNeq32F(v)
937 case OpNeq64:
938 return rewriteValueAMD64_OpNeq64(v)
939 case OpNeq64F:
940 return rewriteValueAMD64_OpNeq64F(v)
941 case OpNeq8:
942 return rewriteValueAMD64_OpNeq8(v)
943 case OpNeqB:
944 return rewriteValueAMD64_OpNeqB(v)
945 case OpNeqPtr:
946 return rewriteValueAMD64_OpNeqPtr(v)
947 case OpNilCheck:
948 v.Op = OpAMD64LoweredNilCheck
949 return true
950 case OpNot:
951 return rewriteValueAMD64_OpNot(v)
952 case OpOffPtr:
953 return rewriteValueAMD64_OpOffPtr(v)
954 case OpOr16:
955 v.Op = OpAMD64ORL
956 return true
957 case OpOr32:
958 v.Op = OpAMD64ORL
959 return true
960 case OpOr64:
961 v.Op = OpAMD64ORQ
962 return true
963 case OpOr8:
964 v.Op = OpAMD64ORL
965 return true
966 case OpOrB:
967 v.Op = OpAMD64ORL
968 return true
969 case OpPanicBounds:
970 return rewriteValueAMD64_OpPanicBounds(v)
971 case OpPopCount16:
972 return rewriteValueAMD64_OpPopCount16(v)
973 case OpPopCount32:
974 v.Op = OpAMD64POPCNTL
975 return true
976 case OpPopCount64:
977 v.Op = OpAMD64POPCNTQ
978 return true
979 case OpPopCount8:
980 return rewriteValueAMD64_OpPopCount8(v)
981 case OpPrefetchCache:
982 v.Op = OpAMD64PrefetchT0
983 return true
984 case OpPrefetchCacheStreamed:
985 v.Op = OpAMD64PrefetchNTA
986 return true
987 case OpRotateLeft16:
988 v.Op = OpAMD64ROLW
989 return true
990 case OpRotateLeft32:
991 v.Op = OpAMD64ROLL
992 return true
993 case OpRotateLeft64:
994 v.Op = OpAMD64ROLQ
995 return true
996 case OpRotateLeft8:
997 v.Op = OpAMD64ROLB
998 return true
999 case OpRound32F:
1000 v.Op = OpCopy
1001 return true
1002 case OpRound64F:
1003 v.Op = OpCopy
1004 return true
1005 case OpRoundToEven:
1006 return rewriteValueAMD64_OpRoundToEven(v)
1007 case OpRsh16Ux16:
1008 return rewriteValueAMD64_OpRsh16Ux16(v)
1009 case OpRsh16Ux32:
1010 return rewriteValueAMD64_OpRsh16Ux32(v)
1011 case OpRsh16Ux64:
1012 return rewriteValueAMD64_OpRsh16Ux64(v)
1013 case OpRsh16Ux8:
1014 return rewriteValueAMD64_OpRsh16Ux8(v)
1015 case OpRsh16x16:
1016 return rewriteValueAMD64_OpRsh16x16(v)
1017 case OpRsh16x32:
1018 return rewriteValueAMD64_OpRsh16x32(v)
1019 case OpRsh16x64:
1020 return rewriteValueAMD64_OpRsh16x64(v)
1021 case OpRsh16x8:
1022 return rewriteValueAMD64_OpRsh16x8(v)
1023 case OpRsh32Ux16:
1024 return rewriteValueAMD64_OpRsh32Ux16(v)
1025 case OpRsh32Ux32:
1026 return rewriteValueAMD64_OpRsh32Ux32(v)
1027 case OpRsh32Ux64:
1028 return rewriteValueAMD64_OpRsh32Ux64(v)
1029 case OpRsh32Ux8:
1030 return rewriteValueAMD64_OpRsh32Ux8(v)
1031 case OpRsh32x16:
1032 return rewriteValueAMD64_OpRsh32x16(v)
1033 case OpRsh32x32:
1034 return rewriteValueAMD64_OpRsh32x32(v)
1035 case OpRsh32x64:
1036 return rewriteValueAMD64_OpRsh32x64(v)
1037 case OpRsh32x8:
1038 return rewriteValueAMD64_OpRsh32x8(v)
1039 case OpRsh64Ux16:
1040 return rewriteValueAMD64_OpRsh64Ux16(v)
1041 case OpRsh64Ux32:
1042 return rewriteValueAMD64_OpRsh64Ux32(v)
1043 case OpRsh64Ux64:
1044 return rewriteValueAMD64_OpRsh64Ux64(v)
1045 case OpRsh64Ux8:
1046 return rewriteValueAMD64_OpRsh64Ux8(v)
1047 case OpRsh64x16:
1048 return rewriteValueAMD64_OpRsh64x16(v)
1049 case OpRsh64x32:
1050 return rewriteValueAMD64_OpRsh64x32(v)
1051 case OpRsh64x64:
1052 return rewriteValueAMD64_OpRsh64x64(v)
1053 case OpRsh64x8:
1054 return rewriteValueAMD64_OpRsh64x8(v)
1055 case OpRsh8Ux16:
1056 return rewriteValueAMD64_OpRsh8Ux16(v)
1057 case OpRsh8Ux32:
1058 return rewriteValueAMD64_OpRsh8Ux32(v)
1059 case OpRsh8Ux64:
1060 return rewriteValueAMD64_OpRsh8Ux64(v)
1061 case OpRsh8Ux8:
1062 return rewriteValueAMD64_OpRsh8Ux8(v)
1063 case OpRsh8x16:
1064 return rewriteValueAMD64_OpRsh8x16(v)
1065 case OpRsh8x32:
1066 return rewriteValueAMD64_OpRsh8x32(v)
1067 case OpRsh8x64:
1068 return rewriteValueAMD64_OpRsh8x64(v)
1069 case OpRsh8x8:
1070 return rewriteValueAMD64_OpRsh8x8(v)
1071 case OpSelect0:
1072 return rewriteValueAMD64_OpSelect0(v)
1073 case OpSelect1:
1074 return rewriteValueAMD64_OpSelect1(v)
1075 case OpSelectN:
1076 return rewriteValueAMD64_OpSelectN(v)
1077 case OpSignExt16to32:
1078 v.Op = OpAMD64MOVWQSX
1079 return true
1080 case OpSignExt16to64:
1081 v.Op = OpAMD64MOVWQSX
1082 return true
1083 case OpSignExt32to64:
1084 v.Op = OpAMD64MOVLQSX
1085 return true
1086 case OpSignExt8to16:
1087 v.Op = OpAMD64MOVBQSX
1088 return true
1089 case OpSignExt8to32:
1090 v.Op = OpAMD64MOVBQSX
1091 return true
1092 case OpSignExt8to64:
1093 v.Op = OpAMD64MOVBQSX
1094 return true
1095 case OpSlicemask:
1096 return rewriteValueAMD64_OpSlicemask(v)
1097 case OpSpectreIndex:
1098 return rewriteValueAMD64_OpSpectreIndex(v)
1099 case OpSpectreSliceIndex:
1100 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1101 case OpSqrt:
1102 v.Op = OpAMD64SQRTSD
1103 return true
1104 case OpSqrt32:
1105 v.Op = OpAMD64SQRTSS
1106 return true
1107 case OpStaticCall:
1108 v.Op = OpAMD64CALLstatic
1109 return true
1110 case OpStore:
1111 return rewriteValueAMD64_OpStore(v)
1112 case OpSub16:
1113 v.Op = OpAMD64SUBL
1114 return true
1115 case OpSub32:
1116 v.Op = OpAMD64SUBL
1117 return true
1118 case OpSub32F:
1119 v.Op = OpAMD64SUBSS
1120 return true
1121 case OpSub64:
1122 v.Op = OpAMD64SUBQ
1123 return true
1124 case OpSub64F:
1125 v.Op = OpAMD64SUBSD
1126 return true
1127 case OpSub8:
1128 v.Op = OpAMD64SUBL
1129 return true
1130 case OpSubPtr:
1131 v.Op = OpAMD64SUBQ
1132 return true
1133 case OpTailCall:
1134 v.Op = OpAMD64CALLtail
1135 return true
1136 case OpTrunc:
1137 return rewriteValueAMD64_OpTrunc(v)
1138 case OpTrunc16to8:
1139 v.Op = OpCopy
1140 return true
1141 case OpTrunc32to16:
1142 v.Op = OpCopy
1143 return true
1144 case OpTrunc32to8:
1145 v.Op = OpCopy
1146 return true
1147 case OpTrunc64to16:
1148 v.Op = OpCopy
1149 return true
1150 case OpTrunc64to32:
1151 v.Op = OpCopy
1152 return true
1153 case OpTrunc64to8:
1154 v.Op = OpCopy
1155 return true
1156 case OpWB:
1157 v.Op = OpAMD64LoweredWB
1158 return true
1159 case OpXor16:
1160 v.Op = OpAMD64XORL
1161 return true
1162 case OpXor32:
1163 v.Op = OpAMD64XORL
1164 return true
1165 case OpXor64:
1166 v.Op = OpAMD64XORQ
1167 return true
1168 case OpXor8:
1169 v.Op = OpAMD64XORL
1170 return true
1171 case OpZero:
1172 return rewriteValueAMD64_OpZero(v)
1173 case OpZeroExt16to32:
1174 v.Op = OpAMD64MOVWQZX
1175 return true
1176 case OpZeroExt16to64:
1177 v.Op = OpAMD64MOVWQZX
1178 return true
1179 case OpZeroExt32to64:
1180 v.Op = OpAMD64MOVLQZX
1181 return true
1182 case OpZeroExt8to16:
1183 v.Op = OpAMD64MOVBQZX
1184 return true
1185 case OpZeroExt8to32:
1186 v.Op = OpAMD64MOVBQZX
1187 return true
1188 case OpZeroExt8to64:
1189 v.Op = OpAMD64MOVBQZX
1190 return true
1191 }
1192 return false
1193 }
1194 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1195 v_2 := v.Args[2]
1196 v_1 := v.Args[1]
1197 v_0 := v.Args[0]
1198
1199
1200
1201 for {
1202 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1203 x := v_0
1204 if v_1.Op != OpAMD64MOVQconst {
1205 continue
1206 }
1207 c := auxIntToInt64(v_1.AuxInt)
1208 carry := v_2
1209 if !(is32Bit(c)) {
1210 continue
1211 }
1212 v.reset(OpAMD64ADCQconst)
1213 v.AuxInt = int32ToAuxInt(int32(c))
1214 v.AddArg2(x, carry)
1215 return true
1216 }
1217 break
1218 }
1219
1220
1221 for {
1222 x := v_0
1223 y := v_1
1224 if v_2.Op != OpAMD64FlagEQ {
1225 break
1226 }
1227 v.reset(OpAMD64ADDQcarry)
1228 v.AddArg2(x, y)
1229 return true
1230 }
1231 return false
1232 }
1233 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1234 v_1 := v.Args[1]
1235 v_0 := v.Args[0]
1236
1237
1238 for {
1239 c := auxIntToInt32(v.AuxInt)
1240 x := v_0
1241 if v_1.Op != OpAMD64FlagEQ {
1242 break
1243 }
1244 v.reset(OpAMD64ADDQconstcarry)
1245 v.AuxInt = int32ToAuxInt(c)
1246 v.AddArg(x)
1247 return true
1248 }
1249 return false
1250 }
1251 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1252 v_1 := v.Args[1]
1253 v_0 := v.Args[0]
1254
1255
1256 for {
1257 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1258 x := v_0
1259 if v_1.Op != OpAMD64MOVLconst {
1260 continue
1261 }
1262 c := auxIntToInt32(v_1.AuxInt)
1263 v.reset(OpAMD64ADDLconst)
1264 v.AuxInt = int32ToAuxInt(c)
1265 v.AddArg(x)
1266 return true
1267 }
1268 break
1269 }
1270
1271
1272 for {
1273 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1274 x := v_0
1275 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1276 continue
1277 }
1278 y := v_1.Args[0]
1279 v.reset(OpAMD64LEAL8)
1280 v.AddArg2(x, y)
1281 return true
1282 }
1283 break
1284 }
1285
1286
1287 for {
1288 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1289 x := v_0
1290 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1291 continue
1292 }
1293 y := v_1.Args[0]
1294 v.reset(OpAMD64LEAL4)
1295 v.AddArg2(x, y)
1296 return true
1297 }
1298 break
1299 }
1300
1301
1302 for {
1303 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1304 x := v_0
1305 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
1306 continue
1307 }
1308 y := v_1.Args[0]
1309 v.reset(OpAMD64LEAL2)
1310 v.AddArg2(x, y)
1311 return true
1312 }
1313 break
1314 }
1315
1316
1317 for {
1318 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1319 x := v_0
1320 if v_1.Op != OpAMD64ADDL {
1321 continue
1322 }
1323 y := v_1.Args[1]
1324 if y != v_1.Args[0] {
1325 continue
1326 }
1327 v.reset(OpAMD64LEAL2)
1328 v.AddArg2(x, y)
1329 return true
1330 }
1331 break
1332 }
1333
1334
1335 for {
1336 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1337 x := v_0
1338 if v_1.Op != OpAMD64ADDL {
1339 continue
1340 }
1341 _ = v_1.Args[1]
1342 v_1_0 := v_1.Args[0]
1343 v_1_1 := v_1.Args[1]
1344 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1345 if x != v_1_0 {
1346 continue
1347 }
1348 y := v_1_1
1349 v.reset(OpAMD64LEAL2)
1350 v.AddArg2(y, x)
1351 return true
1352 }
1353 }
1354 break
1355 }
1356
1357
1358 for {
1359 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1360 if v_0.Op != OpAMD64ADDLconst {
1361 continue
1362 }
1363 c := auxIntToInt32(v_0.AuxInt)
1364 x := v_0.Args[0]
1365 y := v_1
1366 v.reset(OpAMD64LEAL1)
1367 v.AuxInt = int32ToAuxInt(c)
1368 v.AddArg2(x, y)
1369 return true
1370 }
1371 break
1372 }
1373
1374
1375
1376 for {
1377 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1378 x := v_0
1379 if v_1.Op != OpAMD64LEAL {
1380 continue
1381 }
1382 c := auxIntToInt32(v_1.AuxInt)
1383 s := auxToSym(v_1.Aux)
1384 y := v_1.Args[0]
1385 if !(x.Op != OpSB && y.Op != OpSB) {
1386 continue
1387 }
1388 v.reset(OpAMD64LEAL1)
1389 v.AuxInt = int32ToAuxInt(c)
1390 v.Aux = symToAux(s)
1391 v.AddArg2(x, y)
1392 return true
1393 }
1394 break
1395 }
1396
1397
1398 for {
1399 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1400 x := v_0
1401 if v_1.Op != OpAMD64NEGL {
1402 continue
1403 }
1404 y := v_1.Args[0]
1405 v.reset(OpAMD64SUBL)
1406 v.AddArg2(x, y)
1407 return true
1408 }
1409 break
1410 }
1411
1412
1413
1414 for {
1415 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1416 x := v_0
1417 l := v_1
1418 if l.Op != OpAMD64MOVLload {
1419 continue
1420 }
1421 off := auxIntToInt32(l.AuxInt)
1422 sym := auxToSym(l.Aux)
1423 mem := l.Args[1]
1424 ptr := l.Args[0]
1425 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1426 continue
1427 }
1428 v.reset(OpAMD64ADDLload)
1429 v.AuxInt = int32ToAuxInt(off)
1430 v.Aux = symToAux(sym)
1431 v.AddArg3(x, ptr, mem)
1432 return true
1433 }
1434 break
1435 }
1436 return false
1437 }
1438 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1439 v_0 := v.Args[0]
1440
1441
1442 for {
1443 c := auxIntToInt32(v.AuxInt)
1444 if v_0.Op != OpAMD64ADDL {
1445 break
1446 }
1447 y := v_0.Args[1]
1448 x := v_0.Args[0]
1449 v.reset(OpAMD64LEAL1)
1450 v.AuxInt = int32ToAuxInt(c)
1451 v.AddArg2(x, y)
1452 return true
1453 }
1454
1455
1456 for {
1457 c := auxIntToInt32(v.AuxInt)
1458 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1459 break
1460 }
1461 x := v_0.Args[0]
1462 v.reset(OpAMD64LEAL1)
1463 v.AuxInt = int32ToAuxInt(c)
1464 v.AddArg2(x, x)
1465 return true
1466 }
1467
1468
1469
1470 for {
1471 c := auxIntToInt32(v.AuxInt)
1472 if v_0.Op != OpAMD64LEAL {
1473 break
1474 }
1475 d := auxIntToInt32(v_0.AuxInt)
1476 s := auxToSym(v_0.Aux)
1477 x := v_0.Args[0]
1478 if !(is32Bit(int64(c) + int64(d))) {
1479 break
1480 }
1481 v.reset(OpAMD64LEAL)
1482 v.AuxInt = int32ToAuxInt(c + d)
1483 v.Aux = symToAux(s)
1484 v.AddArg(x)
1485 return true
1486 }
1487
1488
1489
1490 for {
1491 c := auxIntToInt32(v.AuxInt)
1492 if v_0.Op != OpAMD64LEAL1 {
1493 break
1494 }
1495 d := auxIntToInt32(v_0.AuxInt)
1496 s := auxToSym(v_0.Aux)
1497 y := v_0.Args[1]
1498 x := v_0.Args[0]
1499 if !(is32Bit(int64(c) + int64(d))) {
1500 break
1501 }
1502 v.reset(OpAMD64LEAL1)
1503 v.AuxInt = int32ToAuxInt(c + d)
1504 v.Aux = symToAux(s)
1505 v.AddArg2(x, y)
1506 return true
1507 }
1508
1509
1510
1511 for {
1512 c := auxIntToInt32(v.AuxInt)
1513 if v_0.Op != OpAMD64LEAL2 {
1514 break
1515 }
1516 d := auxIntToInt32(v_0.AuxInt)
1517 s := auxToSym(v_0.Aux)
1518 y := v_0.Args[1]
1519 x := v_0.Args[0]
1520 if !(is32Bit(int64(c) + int64(d))) {
1521 break
1522 }
1523 v.reset(OpAMD64LEAL2)
1524 v.AuxInt = int32ToAuxInt(c + d)
1525 v.Aux = symToAux(s)
1526 v.AddArg2(x, y)
1527 return true
1528 }
1529
1530
1531
1532 for {
1533 c := auxIntToInt32(v.AuxInt)
1534 if v_0.Op != OpAMD64LEAL4 {
1535 break
1536 }
1537 d := auxIntToInt32(v_0.AuxInt)
1538 s := auxToSym(v_0.Aux)
1539 y := v_0.Args[1]
1540 x := v_0.Args[0]
1541 if !(is32Bit(int64(c) + int64(d))) {
1542 break
1543 }
1544 v.reset(OpAMD64LEAL4)
1545 v.AuxInt = int32ToAuxInt(c + d)
1546 v.Aux = symToAux(s)
1547 v.AddArg2(x, y)
1548 return true
1549 }
1550
1551
1552
1553 for {
1554 c := auxIntToInt32(v.AuxInt)
1555 if v_0.Op != OpAMD64LEAL8 {
1556 break
1557 }
1558 d := auxIntToInt32(v_0.AuxInt)
1559 s := auxToSym(v_0.Aux)
1560 y := v_0.Args[1]
1561 x := v_0.Args[0]
1562 if !(is32Bit(int64(c) + int64(d))) {
1563 break
1564 }
1565 v.reset(OpAMD64LEAL8)
1566 v.AuxInt = int32ToAuxInt(c + d)
1567 v.Aux = symToAux(s)
1568 v.AddArg2(x, y)
1569 return true
1570 }
1571
1572
1573
1574 for {
1575 c := auxIntToInt32(v.AuxInt)
1576 x := v_0
1577 if !(c == 0) {
1578 break
1579 }
1580 v.copyOf(x)
1581 return true
1582 }
1583
1584
1585 for {
1586 c := auxIntToInt32(v.AuxInt)
1587 if v_0.Op != OpAMD64MOVLconst {
1588 break
1589 }
1590 d := auxIntToInt32(v_0.AuxInt)
1591 v.reset(OpAMD64MOVLconst)
1592 v.AuxInt = int32ToAuxInt(c + d)
1593 return true
1594 }
1595
1596
1597 for {
1598 c := auxIntToInt32(v.AuxInt)
1599 if v_0.Op != OpAMD64ADDLconst {
1600 break
1601 }
1602 d := auxIntToInt32(v_0.AuxInt)
1603 x := v_0.Args[0]
1604 v.reset(OpAMD64ADDLconst)
1605 v.AuxInt = int32ToAuxInt(c + d)
1606 v.AddArg(x)
1607 return true
1608 }
1609
1610
1611 for {
1612 off := auxIntToInt32(v.AuxInt)
1613 x := v_0
1614 if x.Op != OpSP {
1615 break
1616 }
1617 v.reset(OpAMD64LEAL)
1618 v.AuxInt = int32ToAuxInt(off)
1619 v.AddArg(x)
1620 return true
1621 }
1622 return false
1623 }
1624 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1625 v_1 := v.Args[1]
1626 v_0 := v.Args[0]
1627
1628
1629
1630 for {
1631 valoff1 := auxIntToValAndOff(v.AuxInt)
1632 sym := auxToSym(v.Aux)
1633 if v_0.Op != OpAMD64ADDQconst {
1634 break
1635 }
1636 off2 := auxIntToInt32(v_0.AuxInt)
1637 base := v_0.Args[0]
1638 mem := v_1
1639 if !(ValAndOff(valoff1).canAdd32(off2)) {
1640 break
1641 }
1642 v.reset(OpAMD64ADDLconstmodify)
1643 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1644 v.Aux = symToAux(sym)
1645 v.AddArg2(base, mem)
1646 return true
1647 }
1648
1649
1650
1651 for {
1652 valoff1 := auxIntToValAndOff(v.AuxInt)
1653 sym1 := auxToSym(v.Aux)
1654 if v_0.Op != OpAMD64LEAQ {
1655 break
1656 }
1657 off2 := auxIntToInt32(v_0.AuxInt)
1658 sym2 := auxToSym(v_0.Aux)
1659 base := v_0.Args[0]
1660 mem := v_1
1661 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1662 break
1663 }
1664 v.reset(OpAMD64ADDLconstmodify)
1665 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1666 v.Aux = symToAux(mergeSym(sym1, sym2))
1667 v.AddArg2(base, mem)
1668 return true
1669 }
1670 return false
1671 }
1672 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1673 v_2 := v.Args[2]
1674 v_1 := v.Args[1]
1675 v_0 := v.Args[0]
1676 b := v.Block
1677 typ := &b.Func.Config.Types
1678
1679
1680
1681 for {
1682 off1 := auxIntToInt32(v.AuxInt)
1683 sym := auxToSym(v.Aux)
1684 val := v_0
1685 if v_1.Op != OpAMD64ADDQconst {
1686 break
1687 }
1688 off2 := auxIntToInt32(v_1.AuxInt)
1689 base := v_1.Args[0]
1690 mem := v_2
1691 if !(is32Bit(int64(off1) + int64(off2))) {
1692 break
1693 }
1694 v.reset(OpAMD64ADDLload)
1695 v.AuxInt = int32ToAuxInt(off1 + off2)
1696 v.Aux = symToAux(sym)
1697 v.AddArg3(val, base, mem)
1698 return true
1699 }
1700
1701
1702
1703 for {
1704 off1 := auxIntToInt32(v.AuxInt)
1705 sym1 := auxToSym(v.Aux)
1706 val := v_0
1707 if v_1.Op != OpAMD64LEAQ {
1708 break
1709 }
1710 off2 := auxIntToInt32(v_1.AuxInt)
1711 sym2 := auxToSym(v_1.Aux)
1712 base := v_1.Args[0]
1713 mem := v_2
1714 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1715 break
1716 }
1717 v.reset(OpAMD64ADDLload)
1718 v.AuxInt = int32ToAuxInt(off1 + off2)
1719 v.Aux = symToAux(mergeSym(sym1, sym2))
1720 v.AddArg3(val, base, mem)
1721 return true
1722 }
1723
1724
1725 for {
1726 off := auxIntToInt32(v.AuxInt)
1727 sym := auxToSym(v.Aux)
1728 x := v_0
1729 ptr := v_1
1730 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1731 break
1732 }
1733 y := v_2.Args[1]
1734 if ptr != v_2.Args[0] {
1735 break
1736 }
1737 v.reset(OpAMD64ADDL)
1738 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1739 v0.AddArg(y)
1740 v.AddArg2(x, v0)
1741 return true
1742 }
1743 return false
1744 }
1745 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1746 v_2 := v.Args[2]
1747 v_1 := v.Args[1]
1748 v_0 := v.Args[0]
1749
1750
1751
1752 for {
1753 off1 := auxIntToInt32(v.AuxInt)
1754 sym := auxToSym(v.Aux)
1755 if v_0.Op != OpAMD64ADDQconst {
1756 break
1757 }
1758 off2 := auxIntToInt32(v_0.AuxInt)
1759 base := v_0.Args[0]
1760 val := v_1
1761 mem := v_2
1762 if !(is32Bit(int64(off1) + int64(off2))) {
1763 break
1764 }
1765 v.reset(OpAMD64ADDLmodify)
1766 v.AuxInt = int32ToAuxInt(off1 + off2)
1767 v.Aux = symToAux(sym)
1768 v.AddArg3(base, val, mem)
1769 return true
1770 }
1771
1772
1773
1774 for {
1775 off1 := auxIntToInt32(v.AuxInt)
1776 sym1 := auxToSym(v.Aux)
1777 if v_0.Op != OpAMD64LEAQ {
1778 break
1779 }
1780 off2 := auxIntToInt32(v_0.AuxInt)
1781 sym2 := auxToSym(v_0.Aux)
1782 base := v_0.Args[0]
1783 val := v_1
1784 mem := v_2
1785 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1786 break
1787 }
1788 v.reset(OpAMD64ADDLmodify)
1789 v.AuxInt = int32ToAuxInt(off1 + off2)
1790 v.Aux = symToAux(mergeSym(sym1, sym2))
1791 v.AddArg3(base, val, mem)
1792 return true
1793 }
1794 return false
1795 }
1796 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1797 v_1 := v.Args[1]
1798 v_0 := v.Args[0]
1799
1800
1801
1802 for {
1803 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1804 x := v_0
1805 if v_1.Op != OpAMD64MOVQconst {
1806 continue
1807 }
1808 t := v_1.Type
1809 c := auxIntToInt64(v_1.AuxInt)
1810 if !(is32Bit(c) && !t.IsPtr()) {
1811 continue
1812 }
1813 v.reset(OpAMD64ADDQconst)
1814 v.AuxInt = int32ToAuxInt(int32(c))
1815 v.AddArg(x)
1816 return true
1817 }
1818 break
1819 }
1820
1821
1822 for {
1823 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1824 x := v_0
1825 if v_1.Op != OpAMD64MOVLconst {
1826 continue
1827 }
1828 c := auxIntToInt32(v_1.AuxInt)
1829 v.reset(OpAMD64ADDQconst)
1830 v.AuxInt = int32ToAuxInt(c)
1831 v.AddArg(x)
1832 return true
1833 }
1834 break
1835 }
1836
1837
1838 for {
1839 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1840 x := v_0
1841 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1842 continue
1843 }
1844 y := v_1.Args[0]
1845 v.reset(OpAMD64LEAQ8)
1846 v.AddArg2(x, y)
1847 return true
1848 }
1849 break
1850 }
1851
1852
1853 for {
1854 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1855 x := v_0
1856 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1857 continue
1858 }
1859 y := v_1.Args[0]
1860 v.reset(OpAMD64LEAQ4)
1861 v.AddArg2(x, y)
1862 return true
1863 }
1864 break
1865 }
1866
1867
1868 for {
1869 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1870 x := v_0
1871 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
1872 continue
1873 }
1874 y := v_1.Args[0]
1875 v.reset(OpAMD64LEAQ2)
1876 v.AddArg2(x, y)
1877 return true
1878 }
1879 break
1880 }
1881
1882
1883 for {
1884 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1885 x := v_0
1886 if v_1.Op != OpAMD64ADDQ {
1887 continue
1888 }
1889 y := v_1.Args[1]
1890 if y != v_1.Args[0] {
1891 continue
1892 }
1893 v.reset(OpAMD64LEAQ2)
1894 v.AddArg2(x, y)
1895 return true
1896 }
1897 break
1898 }
1899
1900
1901 for {
1902 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1903 x := v_0
1904 if v_1.Op != OpAMD64ADDQ {
1905 continue
1906 }
1907 _ = v_1.Args[1]
1908 v_1_0 := v_1.Args[0]
1909 v_1_1 := v_1.Args[1]
1910 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1911 if x != v_1_0 {
1912 continue
1913 }
1914 y := v_1_1
1915 v.reset(OpAMD64LEAQ2)
1916 v.AddArg2(y, x)
1917 return true
1918 }
1919 }
1920 break
1921 }
1922
1923
1924 for {
1925 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1926 if v_0.Op != OpAMD64ADDQconst {
1927 continue
1928 }
1929 c := auxIntToInt32(v_0.AuxInt)
1930 x := v_0.Args[0]
1931 y := v_1
1932 v.reset(OpAMD64LEAQ1)
1933 v.AuxInt = int32ToAuxInt(c)
1934 v.AddArg2(x, y)
1935 return true
1936 }
1937 break
1938 }
1939
1940
1941
1942 for {
1943 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1944 x := v_0
1945 if v_1.Op != OpAMD64LEAQ {
1946 continue
1947 }
1948 c := auxIntToInt32(v_1.AuxInt)
1949 s := auxToSym(v_1.Aux)
1950 y := v_1.Args[0]
1951 if !(x.Op != OpSB && y.Op != OpSB) {
1952 continue
1953 }
1954 v.reset(OpAMD64LEAQ1)
1955 v.AuxInt = int32ToAuxInt(c)
1956 v.Aux = symToAux(s)
1957 v.AddArg2(x, y)
1958 return true
1959 }
1960 break
1961 }
1962
1963
1964 for {
1965 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1966 x := v_0
1967 if v_1.Op != OpAMD64NEGQ {
1968 continue
1969 }
1970 y := v_1.Args[0]
1971 v.reset(OpAMD64SUBQ)
1972 v.AddArg2(x, y)
1973 return true
1974 }
1975 break
1976 }
1977
1978
1979
1980 for {
1981 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1982 x := v_0
1983 l := v_1
1984 if l.Op != OpAMD64MOVQload {
1985 continue
1986 }
1987 off := auxIntToInt32(l.AuxInt)
1988 sym := auxToSym(l.Aux)
1989 mem := l.Args[1]
1990 ptr := l.Args[0]
1991 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1992 continue
1993 }
1994 v.reset(OpAMD64ADDQload)
1995 v.AuxInt = int32ToAuxInt(off)
1996 v.Aux = symToAux(sym)
1997 v.AddArg3(x, ptr, mem)
1998 return true
1999 }
2000 break
2001 }
2002 return false
2003 }
2004 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2005 v_1 := v.Args[1]
2006 v_0 := v.Args[0]
2007
2008
2009
2010 for {
2011 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2012 x := v_0
2013 if v_1.Op != OpAMD64MOVQconst {
2014 continue
2015 }
2016 c := auxIntToInt64(v_1.AuxInt)
2017 if !(is32Bit(c)) {
2018 continue
2019 }
2020 v.reset(OpAMD64ADDQconstcarry)
2021 v.AuxInt = int32ToAuxInt(int32(c))
2022 v.AddArg(x)
2023 return true
2024 }
2025 break
2026 }
2027 return false
2028 }
2029 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2030 v_0 := v.Args[0]
2031
2032
2033 for {
2034 c := auxIntToInt32(v.AuxInt)
2035 if v_0.Op != OpAMD64ADDQ {
2036 break
2037 }
2038 y := v_0.Args[1]
2039 x := v_0.Args[0]
2040 v.reset(OpAMD64LEAQ1)
2041 v.AuxInt = int32ToAuxInt(c)
2042 v.AddArg2(x, y)
2043 return true
2044 }
2045
2046
2047 for {
2048 c := auxIntToInt32(v.AuxInt)
2049 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
2050 break
2051 }
2052 x := v_0.Args[0]
2053 v.reset(OpAMD64LEAQ1)
2054 v.AuxInt = int32ToAuxInt(c)
2055 v.AddArg2(x, x)
2056 return true
2057 }
2058
2059
2060
2061 for {
2062 c := auxIntToInt32(v.AuxInt)
2063 if v_0.Op != OpAMD64LEAQ {
2064 break
2065 }
2066 d := auxIntToInt32(v_0.AuxInt)
2067 s := auxToSym(v_0.Aux)
2068 x := v_0.Args[0]
2069 if !(is32Bit(int64(c) + int64(d))) {
2070 break
2071 }
2072 v.reset(OpAMD64LEAQ)
2073 v.AuxInt = int32ToAuxInt(c + d)
2074 v.Aux = symToAux(s)
2075 v.AddArg(x)
2076 return true
2077 }
2078
2079
2080
2081 for {
2082 c := auxIntToInt32(v.AuxInt)
2083 if v_0.Op != OpAMD64LEAQ1 {
2084 break
2085 }
2086 d := auxIntToInt32(v_0.AuxInt)
2087 s := auxToSym(v_0.Aux)
2088 y := v_0.Args[1]
2089 x := v_0.Args[0]
2090 if !(is32Bit(int64(c) + int64(d))) {
2091 break
2092 }
2093 v.reset(OpAMD64LEAQ1)
2094 v.AuxInt = int32ToAuxInt(c + d)
2095 v.Aux = symToAux(s)
2096 v.AddArg2(x, y)
2097 return true
2098 }
2099
2100
2101
2102 for {
2103 c := auxIntToInt32(v.AuxInt)
2104 if v_0.Op != OpAMD64LEAQ2 {
2105 break
2106 }
2107 d := auxIntToInt32(v_0.AuxInt)
2108 s := auxToSym(v_0.Aux)
2109 y := v_0.Args[1]
2110 x := v_0.Args[0]
2111 if !(is32Bit(int64(c) + int64(d))) {
2112 break
2113 }
2114 v.reset(OpAMD64LEAQ2)
2115 v.AuxInt = int32ToAuxInt(c + d)
2116 v.Aux = symToAux(s)
2117 v.AddArg2(x, y)
2118 return true
2119 }
2120
2121
2122
2123 for {
2124 c := auxIntToInt32(v.AuxInt)
2125 if v_0.Op != OpAMD64LEAQ4 {
2126 break
2127 }
2128 d := auxIntToInt32(v_0.AuxInt)
2129 s := auxToSym(v_0.Aux)
2130 y := v_0.Args[1]
2131 x := v_0.Args[0]
2132 if !(is32Bit(int64(c) + int64(d))) {
2133 break
2134 }
2135 v.reset(OpAMD64LEAQ4)
2136 v.AuxInt = int32ToAuxInt(c + d)
2137 v.Aux = symToAux(s)
2138 v.AddArg2(x, y)
2139 return true
2140 }
2141
2142
2143
2144 for {
2145 c := auxIntToInt32(v.AuxInt)
2146 if v_0.Op != OpAMD64LEAQ8 {
2147 break
2148 }
2149 d := auxIntToInt32(v_0.AuxInt)
2150 s := auxToSym(v_0.Aux)
2151 y := v_0.Args[1]
2152 x := v_0.Args[0]
2153 if !(is32Bit(int64(c) + int64(d))) {
2154 break
2155 }
2156 v.reset(OpAMD64LEAQ8)
2157 v.AuxInt = int32ToAuxInt(c + d)
2158 v.Aux = symToAux(s)
2159 v.AddArg2(x, y)
2160 return true
2161 }
2162
2163
2164 for {
2165 if auxIntToInt32(v.AuxInt) != 0 {
2166 break
2167 }
2168 x := v_0
2169 v.copyOf(x)
2170 return true
2171 }
2172
2173
2174 for {
2175 c := auxIntToInt32(v.AuxInt)
2176 if v_0.Op != OpAMD64MOVQconst {
2177 break
2178 }
2179 d := auxIntToInt64(v_0.AuxInt)
2180 v.reset(OpAMD64MOVQconst)
2181 v.AuxInt = int64ToAuxInt(int64(c) + d)
2182 return true
2183 }
2184
2185
2186
2187 for {
2188 c := auxIntToInt32(v.AuxInt)
2189 if v_0.Op != OpAMD64ADDQconst {
2190 break
2191 }
2192 d := auxIntToInt32(v_0.AuxInt)
2193 x := v_0.Args[0]
2194 if !(is32Bit(int64(c) + int64(d))) {
2195 break
2196 }
2197 v.reset(OpAMD64ADDQconst)
2198 v.AuxInt = int32ToAuxInt(c + d)
2199 v.AddArg(x)
2200 return true
2201 }
2202
2203
2204 for {
2205 off := auxIntToInt32(v.AuxInt)
2206 x := v_0
2207 if x.Op != OpSP {
2208 break
2209 }
2210 v.reset(OpAMD64LEAQ)
2211 v.AuxInt = int32ToAuxInt(off)
2212 v.AddArg(x)
2213 return true
2214 }
2215 return false
2216 }
2217 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2218 v_1 := v.Args[1]
2219 v_0 := v.Args[0]
2220
2221
2222
2223 for {
2224 valoff1 := auxIntToValAndOff(v.AuxInt)
2225 sym := auxToSym(v.Aux)
2226 if v_0.Op != OpAMD64ADDQconst {
2227 break
2228 }
2229 off2 := auxIntToInt32(v_0.AuxInt)
2230 base := v_0.Args[0]
2231 mem := v_1
2232 if !(ValAndOff(valoff1).canAdd32(off2)) {
2233 break
2234 }
2235 v.reset(OpAMD64ADDQconstmodify)
2236 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2237 v.Aux = symToAux(sym)
2238 v.AddArg2(base, mem)
2239 return true
2240 }
2241
2242
2243
2244 for {
2245 valoff1 := auxIntToValAndOff(v.AuxInt)
2246 sym1 := auxToSym(v.Aux)
2247 if v_0.Op != OpAMD64LEAQ {
2248 break
2249 }
2250 off2 := auxIntToInt32(v_0.AuxInt)
2251 sym2 := auxToSym(v_0.Aux)
2252 base := v_0.Args[0]
2253 mem := v_1
2254 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2255 break
2256 }
2257 v.reset(OpAMD64ADDQconstmodify)
2258 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2259 v.Aux = symToAux(mergeSym(sym1, sym2))
2260 v.AddArg2(base, mem)
2261 return true
2262 }
2263 return false
2264 }
2265 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2266 v_2 := v.Args[2]
2267 v_1 := v.Args[1]
2268 v_0 := v.Args[0]
2269 b := v.Block
2270 typ := &b.Func.Config.Types
2271
2272
2273
2274 for {
2275 off1 := auxIntToInt32(v.AuxInt)
2276 sym := auxToSym(v.Aux)
2277 val := v_0
2278 if v_1.Op != OpAMD64ADDQconst {
2279 break
2280 }
2281 off2 := auxIntToInt32(v_1.AuxInt)
2282 base := v_1.Args[0]
2283 mem := v_2
2284 if !(is32Bit(int64(off1) + int64(off2))) {
2285 break
2286 }
2287 v.reset(OpAMD64ADDQload)
2288 v.AuxInt = int32ToAuxInt(off1 + off2)
2289 v.Aux = symToAux(sym)
2290 v.AddArg3(val, base, mem)
2291 return true
2292 }
2293
2294
2295
2296 for {
2297 off1 := auxIntToInt32(v.AuxInt)
2298 sym1 := auxToSym(v.Aux)
2299 val := v_0
2300 if v_1.Op != OpAMD64LEAQ {
2301 break
2302 }
2303 off2 := auxIntToInt32(v_1.AuxInt)
2304 sym2 := auxToSym(v_1.Aux)
2305 base := v_1.Args[0]
2306 mem := v_2
2307 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2308 break
2309 }
2310 v.reset(OpAMD64ADDQload)
2311 v.AuxInt = int32ToAuxInt(off1 + off2)
2312 v.Aux = symToAux(mergeSym(sym1, sym2))
2313 v.AddArg3(val, base, mem)
2314 return true
2315 }
2316
2317
2318 for {
2319 off := auxIntToInt32(v.AuxInt)
2320 sym := auxToSym(v.Aux)
2321 x := v_0
2322 ptr := v_1
2323 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2324 break
2325 }
2326 y := v_2.Args[1]
2327 if ptr != v_2.Args[0] {
2328 break
2329 }
2330 v.reset(OpAMD64ADDQ)
2331 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2332 v0.AddArg(y)
2333 v.AddArg2(x, v0)
2334 return true
2335 }
2336 return false
2337 }
2338 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2339 v_2 := v.Args[2]
2340 v_1 := v.Args[1]
2341 v_0 := v.Args[0]
2342
2343
2344
2345 for {
2346 off1 := auxIntToInt32(v.AuxInt)
2347 sym := auxToSym(v.Aux)
2348 if v_0.Op != OpAMD64ADDQconst {
2349 break
2350 }
2351 off2 := auxIntToInt32(v_0.AuxInt)
2352 base := v_0.Args[0]
2353 val := v_1
2354 mem := v_2
2355 if !(is32Bit(int64(off1) + int64(off2))) {
2356 break
2357 }
2358 v.reset(OpAMD64ADDQmodify)
2359 v.AuxInt = int32ToAuxInt(off1 + off2)
2360 v.Aux = symToAux(sym)
2361 v.AddArg3(base, val, mem)
2362 return true
2363 }
2364
2365
2366
2367 for {
2368 off1 := auxIntToInt32(v.AuxInt)
2369 sym1 := auxToSym(v.Aux)
2370 if v_0.Op != OpAMD64LEAQ {
2371 break
2372 }
2373 off2 := auxIntToInt32(v_0.AuxInt)
2374 sym2 := auxToSym(v_0.Aux)
2375 base := v_0.Args[0]
2376 val := v_1
2377 mem := v_2
2378 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2379 break
2380 }
2381 v.reset(OpAMD64ADDQmodify)
2382 v.AuxInt = int32ToAuxInt(off1 + off2)
2383 v.Aux = symToAux(mergeSym(sym1, sym2))
2384 v.AddArg3(base, val, mem)
2385 return true
2386 }
2387 return false
2388 }
2389 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2390 v_1 := v.Args[1]
2391 v_0 := v.Args[0]
2392
2393
2394
2395 for {
2396 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2397 x := v_0
2398 l := v_1
2399 if l.Op != OpAMD64MOVSDload {
2400 continue
2401 }
2402 off := auxIntToInt32(l.AuxInt)
2403 sym := auxToSym(l.Aux)
2404 mem := l.Args[1]
2405 ptr := l.Args[0]
2406 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2407 continue
2408 }
2409 v.reset(OpAMD64ADDSDload)
2410 v.AuxInt = int32ToAuxInt(off)
2411 v.Aux = symToAux(sym)
2412 v.AddArg3(x, ptr, mem)
2413 return true
2414 }
2415 break
2416 }
2417 return false
2418 }
2419 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2420 v_2 := v.Args[2]
2421 v_1 := v.Args[1]
2422 v_0 := v.Args[0]
2423 b := v.Block
2424 typ := &b.Func.Config.Types
2425
2426
2427
2428 for {
2429 off1 := auxIntToInt32(v.AuxInt)
2430 sym := auxToSym(v.Aux)
2431 val := v_0
2432 if v_1.Op != OpAMD64ADDQconst {
2433 break
2434 }
2435 off2 := auxIntToInt32(v_1.AuxInt)
2436 base := v_1.Args[0]
2437 mem := v_2
2438 if !(is32Bit(int64(off1) + int64(off2))) {
2439 break
2440 }
2441 v.reset(OpAMD64ADDSDload)
2442 v.AuxInt = int32ToAuxInt(off1 + off2)
2443 v.Aux = symToAux(sym)
2444 v.AddArg3(val, base, mem)
2445 return true
2446 }
2447
2448
2449
2450 for {
2451 off1 := auxIntToInt32(v.AuxInt)
2452 sym1 := auxToSym(v.Aux)
2453 val := v_0
2454 if v_1.Op != OpAMD64LEAQ {
2455 break
2456 }
2457 off2 := auxIntToInt32(v_1.AuxInt)
2458 sym2 := auxToSym(v_1.Aux)
2459 base := v_1.Args[0]
2460 mem := v_2
2461 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2462 break
2463 }
2464 v.reset(OpAMD64ADDSDload)
2465 v.AuxInt = int32ToAuxInt(off1 + off2)
2466 v.Aux = symToAux(mergeSym(sym1, sym2))
2467 v.AddArg3(val, base, mem)
2468 return true
2469 }
2470
2471
2472 for {
2473 off := auxIntToInt32(v.AuxInt)
2474 sym := auxToSym(v.Aux)
2475 x := v_0
2476 ptr := v_1
2477 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2478 break
2479 }
2480 y := v_2.Args[1]
2481 if ptr != v_2.Args[0] {
2482 break
2483 }
2484 v.reset(OpAMD64ADDSD)
2485 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2486 v0.AddArg(y)
2487 v.AddArg2(x, v0)
2488 return true
2489 }
2490 return false
2491 }
2492 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2493 v_1 := v.Args[1]
2494 v_0 := v.Args[0]
2495
2496
2497
2498 for {
2499 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2500 x := v_0
2501 l := v_1
2502 if l.Op != OpAMD64MOVSSload {
2503 continue
2504 }
2505 off := auxIntToInt32(l.AuxInt)
2506 sym := auxToSym(l.Aux)
2507 mem := l.Args[1]
2508 ptr := l.Args[0]
2509 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2510 continue
2511 }
2512 v.reset(OpAMD64ADDSSload)
2513 v.AuxInt = int32ToAuxInt(off)
2514 v.Aux = symToAux(sym)
2515 v.AddArg3(x, ptr, mem)
2516 return true
2517 }
2518 break
2519 }
2520 return false
2521 }
2522 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2523 v_2 := v.Args[2]
2524 v_1 := v.Args[1]
2525 v_0 := v.Args[0]
2526 b := v.Block
2527 typ := &b.Func.Config.Types
2528
2529
2530
2531 for {
2532 off1 := auxIntToInt32(v.AuxInt)
2533 sym := auxToSym(v.Aux)
2534 val := v_0
2535 if v_1.Op != OpAMD64ADDQconst {
2536 break
2537 }
2538 off2 := auxIntToInt32(v_1.AuxInt)
2539 base := v_1.Args[0]
2540 mem := v_2
2541 if !(is32Bit(int64(off1) + int64(off2))) {
2542 break
2543 }
2544 v.reset(OpAMD64ADDSSload)
2545 v.AuxInt = int32ToAuxInt(off1 + off2)
2546 v.Aux = symToAux(sym)
2547 v.AddArg3(val, base, mem)
2548 return true
2549 }
2550
2551
2552
2553 for {
2554 off1 := auxIntToInt32(v.AuxInt)
2555 sym1 := auxToSym(v.Aux)
2556 val := v_0
2557 if v_1.Op != OpAMD64LEAQ {
2558 break
2559 }
2560 off2 := auxIntToInt32(v_1.AuxInt)
2561 sym2 := auxToSym(v_1.Aux)
2562 base := v_1.Args[0]
2563 mem := v_2
2564 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2565 break
2566 }
2567 v.reset(OpAMD64ADDSSload)
2568 v.AuxInt = int32ToAuxInt(off1 + off2)
2569 v.Aux = symToAux(mergeSym(sym1, sym2))
2570 v.AddArg3(val, base, mem)
2571 return true
2572 }
2573
2574
2575 for {
2576 off := auxIntToInt32(v.AuxInt)
2577 sym := auxToSym(v.Aux)
2578 x := v_0
2579 ptr := v_1
2580 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2581 break
2582 }
2583 y := v_2.Args[1]
2584 if ptr != v_2.Args[0] {
2585 break
2586 }
2587 v.reset(OpAMD64ADDSS)
2588 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2589 v0.AddArg(y)
2590 v.AddArg2(x, v0)
2591 return true
2592 }
2593 return false
2594 }
2595 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2596 v_1 := v.Args[1]
2597 v_0 := v.Args[0]
2598 b := v.Block
2599 typ := &b.Func.Config.Types
2600
2601
2602 for {
2603 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2604 if v_0.Op != OpAMD64NOTL {
2605 continue
2606 }
2607 v_0_0 := v_0.Args[0]
2608 if v_0_0.Op != OpAMD64SHLL {
2609 continue
2610 }
2611 y := v_0_0.Args[1]
2612 v_0_0_0 := v_0_0.Args[0]
2613 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2614 continue
2615 }
2616 x := v_1
2617 v.reset(OpAMD64BTRL)
2618 v.AddArg2(x, y)
2619 return true
2620 }
2621 break
2622 }
2623
2624
2625 for {
2626 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2627 x := v_0
2628 if v_1.Op != OpAMD64MOVLconst {
2629 continue
2630 }
2631 c := auxIntToInt32(v_1.AuxInt)
2632 v.reset(OpAMD64ANDLconst)
2633 v.AuxInt = int32ToAuxInt(c)
2634 v.AddArg(x)
2635 return true
2636 }
2637 break
2638 }
2639
2640
2641 for {
2642 x := v_0
2643 if x != v_1 {
2644 break
2645 }
2646 v.copyOf(x)
2647 return true
2648 }
2649
2650
2651
2652 for {
2653 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2654 x := v_0
2655 l := v_1
2656 if l.Op != OpAMD64MOVLload {
2657 continue
2658 }
2659 off := auxIntToInt32(l.AuxInt)
2660 sym := auxToSym(l.Aux)
2661 mem := l.Args[1]
2662 ptr := l.Args[0]
2663 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2664 continue
2665 }
2666 v.reset(OpAMD64ANDLload)
2667 v.AuxInt = int32ToAuxInt(off)
2668 v.Aux = symToAux(sym)
2669 v.AddArg3(x, ptr, mem)
2670 return true
2671 }
2672 break
2673 }
2674
2675
2676
2677 for {
2678 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2679 x := v_0
2680 if v_1.Op != OpAMD64NOTL {
2681 continue
2682 }
2683 y := v_1.Args[0]
2684 if !(buildcfg.GOAMD64 >= 3) {
2685 continue
2686 }
2687 v.reset(OpAMD64ANDNL)
2688 v.AddArg2(x, y)
2689 return true
2690 }
2691 break
2692 }
2693
2694
2695
2696 for {
2697 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2698 x := v_0
2699 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2700 continue
2701 }
2702 v.reset(OpAMD64BLSIL)
2703 v.AddArg(x)
2704 return true
2705 }
2706 break
2707 }
2708
2709
2710
2711 for {
2712 t := v.Type
2713 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2714 x := v_0
2715 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2716 continue
2717 }
2718 v.reset(OpSelect0)
2719 v.Type = t
2720 v0 := b.NewValue0(v.Pos, OpAMD64BLSRL, types.NewTuple(typ.UInt32, types.TypeFlags))
2721 v0.AddArg(x)
2722 v.AddArg(v0)
2723 return true
2724 }
2725 break
2726 }
2727 return false
2728 }
2729 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2730 v_0 := v.Args[0]
2731
2732
2733 for {
2734 c := auxIntToInt32(v.AuxInt)
2735 if v_0.Op != OpAMD64ANDLconst {
2736 break
2737 }
2738 d := auxIntToInt32(v_0.AuxInt)
2739 x := v_0.Args[0]
2740 v.reset(OpAMD64ANDLconst)
2741 v.AuxInt = int32ToAuxInt(c & d)
2742 v.AddArg(x)
2743 return true
2744 }
2745
2746
2747 for {
2748 if auxIntToInt32(v.AuxInt) != 0xFF {
2749 break
2750 }
2751 x := v_0
2752 v.reset(OpAMD64MOVBQZX)
2753 v.AddArg(x)
2754 return true
2755 }
2756
2757
2758 for {
2759 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2760 break
2761 }
2762 x := v_0
2763 v.reset(OpAMD64MOVWQZX)
2764 v.AddArg(x)
2765 return true
2766 }
2767
2768
2769
2770 for {
2771 c := auxIntToInt32(v.AuxInt)
2772 if !(c == 0) {
2773 break
2774 }
2775 v.reset(OpAMD64MOVLconst)
2776 v.AuxInt = int32ToAuxInt(0)
2777 return true
2778 }
2779
2780
2781
2782 for {
2783 c := auxIntToInt32(v.AuxInt)
2784 x := v_0
2785 if !(c == -1) {
2786 break
2787 }
2788 v.copyOf(x)
2789 return true
2790 }
2791
2792
2793 for {
2794 c := auxIntToInt32(v.AuxInt)
2795 if v_0.Op != OpAMD64MOVLconst {
2796 break
2797 }
2798 d := auxIntToInt32(v_0.AuxInt)
2799 v.reset(OpAMD64MOVLconst)
2800 v.AuxInt = int32ToAuxInt(c & d)
2801 return true
2802 }
2803 return false
2804 }
2805 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2806 v_1 := v.Args[1]
2807 v_0 := v.Args[0]
2808
2809
2810
2811 for {
2812 valoff1 := auxIntToValAndOff(v.AuxInt)
2813 sym := auxToSym(v.Aux)
2814 if v_0.Op != OpAMD64ADDQconst {
2815 break
2816 }
2817 off2 := auxIntToInt32(v_0.AuxInt)
2818 base := v_0.Args[0]
2819 mem := v_1
2820 if !(ValAndOff(valoff1).canAdd32(off2)) {
2821 break
2822 }
2823 v.reset(OpAMD64ANDLconstmodify)
2824 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2825 v.Aux = symToAux(sym)
2826 v.AddArg2(base, mem)
2827 return true
2828 }
2829
2830
2831
2832 for {
2833 valoff1 := auxIntToValAndOff(v.AuxInt)
2834 sym1 := auxToSym(v.Aux)
2835 if v_0.Op != OpAMD64LEAQ {
2836 break
2837 }
2838 off2 := auxIntToInt32(v_0.AuxInt)
2839 sym2 := auxToSym(v_0.Aux)
2840 base := v_0.Args[0]
2841 mem := v_1
2842 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2843 break
2844 }
2845 v.reset(OpAMD64ANDLconstmodify)
2846 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2847 v.Aux = symToAux(mergeSym(sym1, sym2))
2848 v.AddArg2(base, mem)
2849 return true
2850 }
2851 return false
2852 }
2853 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2854 v_2 := v.Args[2]
2855 v_1 := v.Args[1]
2856 v_0 := v.Args[0]
2857 b := v.Block
2858 typ := &b.Func.Config.Types
2859
2860
2861
2862 for {
2863 off1 := auxIntToInt32(v.AuxInt)
2864 sym := auxToSym(v.Aux)
2865 val := v_0
2866 if v_1.Op != OpAMD64ADDQconst {
2867 break
2868 }
2869 off2 := auxIntToInt32(v_1.AuxInt)
2870 base := v_1.Args[0]
2871 mem := v_2
2872 if !(is32Bit(int64(off1) + int64(off2))) {
2873 break
2874 }
2875 v.reset(OpAMD64ANDLload)
2876 v.AuxInt = int32ToAuxInt(off1 + off2)
2877 v.Aux = symToAux(sym)
2878 v.AddArg3(val, base, mem)
2879 return true
2880 }
2881
2882
2883
2884 for {
2885 off1 := auxIntToInt32(v.AuxInt)
2886 sym1 := auxToSym(v.Aux)
2887 val := v_0
2888 if v_1.Op != OpAMD64LEAQ {
2889 break
2890 }
2891 off2 := auxIntToInt32(v_1.AuxInt)
2892 sym2 := auxToSym(v_1.Aux)
2893 base := v_1.Args[0]
2894 mem := v_2
2895 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2896 break
2897 }
2898 v.reset(OpAMD64ANDLload)
2899 v.AuxInt = int32ToAuxInt(off1 + off2)
2900 v.Aux = symToAux(mergeSym(sym1, sym2))
2901 v.AddArg3(val, base, mem)
2902 return true
2903 }
2904
2905
2906 for {
2907 off := auxIntToInt32(v.AuxInt)
2908 sym := auxToSym(v.Aux)
2909 x := v_0
2910 ptr := v_1
2911 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2912 break
2913 }
2914 y := v_2.Args[1]
2915 if ptr != v_2.Args[0] {
2916 break
2917 }
2918 v.reset(OpAMD64ANDL)
2919 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2920 v0.AddArg(y)
2921 v.AddArg2(x, v0)
2922 return true
2923 }
2924 return false
2925 }
2926 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
2927 v_2 := v.Args[2]
2928 v_1 := v.Args[1]
2929 v_0 := v.Args[0]
2930
2931
2932
2933 for {
2934 off1 := auxIntToInt32(v.AuxInt)
2935 sym := auxToSym(v.Aux)
2936 if v_0.Op != OpAMD64ADDQconst {
2937 break
2938 }
2939 off2 := auxIntToInt32(v_0.AuxInt)
2940 base := v_0.Args[0]
2941 val := v_1
2942 mem := v_2
2943 if !(is32Bit(int64(off1) + int64(off2))) {
2944 break
2945 }
2946 v.reset(OpAMD64ANDLmodify)
2947 v.AuxInt = int32ToAuxInt(off1 + off2)
2948 v.Aux = symToAux(sym)
2949 v.AddArg3(base, val, mem)
2950 return true
2951 }
2952
2953
2954
2955 for {
2956 off1 := auxIntToInt32(v.AuxInt)
2957 sym1 := auxToSym(v.Aux)
2958 if v_0.Op != OpAMD64LEAQ {
2959 break
2960 }
2961 off2 := auxIntToInt32(v_0.AuxInt)
2962 sym2 := auxToSym(v_0.Aux)
2963 base := v_0.Args[0]
2964 val := v_1
2965 mem := v_2
2966 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2967 break
2968 }
2969 v.reset(OpAMD64ANDLmodify)
2970 v.AuxInt = int32ToAuxInt(off1 + off2)
2971 v.Aux = symToAux(mergeSym(sym1, sym2))
2972 v.AddArg3(base, val, mem)
2973 return true
2974 }
2975 return false
2976 }
2977 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
2978 v_1 := v.Args[1]
2979 v_0 := v.Args[0]
2980
2981
2982 for {
2983 x := v_0
2984 if v_1.Op != OpAMD64SHLL {
2985 break
2986 }
2987 y := v_1.Args[1]
2988 v_1_0 := v_1.Args[0]
2989 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
2990 break
2991 }
2992 v.reset(OpAMD64BTRL)
2993 v.AddArg2(x, y)
2994 return true
2995 }
2996 return false
2997 }
2998 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
2999 v_1 := v.Args[1]
3000 v_0 := v.Args[0]
3001
3002
3003 for {
3004 x := v_0
3005 if v_1.Op != OpAMD64SHLQ {
3006 break
3007 }
3008 y := v_1.Args[1]
3009 v_1_0 := v_1.Args[0]
3010 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
3011 break
3012 }
3013 v.reset(OpAMD64BTRQ)
3014 v.AddArg2(x, y)
3015 return true
3016 }
3017 return false
3018 }
3019 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3020 v_1 := v.Args[1]
3021 v_0 := v.Args[0]
3022 b := v.Block
3023 typ := &b.Func.Config.Types
3024
3025
3026 for {
3027 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3028 if v_0.Op != OpAMD64NOTQ {
3029 continue
3030 }
3031 v_0_0 := v_0.Args[0]
3032 if v_0_0.Op != OpAMD64SHLQ {
3033 continue
3034 }
3035 y := v_0_0.Args[1]
3036 v_0_0_0 := v_0_0.Args[0]
3037 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3038 continue
3039 }
3040 x := v_1
3041 v.reset(OpAMD64BTRQ)
3042 v.AddArg2(x, y)
3043 return true
3044 }
3045 break
3046 }
3047
3048
3049
3050 for {
3051 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3052 if v_0.Op != OpAMD64MOVQconst {
3053 continue
3054 }
3055 c := auxIntToInt64(v_0.AuxInt)
3056 x := v_1
3057 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
3058 continue
3059 }
3060 v.reset(OpAMD64BTRQconst)
3061 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3062 v.AddArg(x)
3063 return true
3064 }
3065 break
3066 }
3067
3068
3069
3070 for {
3071 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3072 x := v_0
3073 if v_1.Op != OpAMD64MOVQconst {
3074 continue
3075 }
3076 c := auxIntToInt64(v_1.AuxInt)
3077 if !(is32Bit(c)) {
3078 continue
3079 }
3080 v.reset(OpAMD64ANDQconst)
3081 v.AuxInt = int32ToAuxInt(int32(c))
3082 v.AddArg(x)
3083 return true
3084 }
3085 break
3086 }
3087
3088
3089 for {
3090 x := v_0
3091 if x != v_1 {
3092 break
3093 }
3094 v.copyOf(x)
3095 return true
3096 }
3097
3098
3099
3100 for {
3101 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3102 x := v_0
3103 l := v_1
3104 if l.Op != OpAMD64MOVQload {
3105 continue
3106 }
3107 off := auxIntToInt32(l.AuxInt)
3108 sym := auxToSym(l.Aux)
3109 mem := l.Args[1]
3110 ptr := l.Args[0]
3111 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3112 continue
3113 }
3114 v.reset(OpAMD64ANDQload)
3115 v.AuxInt = int32ToAuxInt(off)
3116 v.Aux = symToAux(sym)
3117 v.AddArg3(x, ptr, mem)
3118 return true
3119 }
3120 break
3121 }
3122
3123
3124
3125 for {
3126 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3127 x := v_0
3128 if v_1.Op != OpAMD64NOTQ {
3129 continue
3130 }
3131 y := v_1.Args[0]
3132 if !(buildcfg.GOAMD64 >= 3) {
3133 continue
3134 }
3135 v.reset(OpAMD64ANDNQ)
3136 v.AddArg2(x, y)
3137 return true
3138 }
3139 break
3140 }
3141
3142
3143
3144 for {
3145 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3146 x := v_0
3147 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3148 continue
3149 }
3150 v.reset(OpAMD64BLSIQ)
3151 v.AddArg(x)
3152 return true
3153 }
3154 break
3155 }
3156
3157
3158
3159 for {
3160 t := v.Type
3161 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3162 x := v_0
3163 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3164 continue
3165 }
3166 v.reset(OpSelect0)
3167 v.Type = t
3168 v0 := b.NewValue0(v.Pos, OpAMD64BLSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
3169 v0.AddArg(x)
3170 v.AddArg(v0)
3171 return true
3172 }
3173 break
3174 }
3175 return false
3176 }
3177 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3178 v_0 := v.Args[0]
3179
3180
3181 for {
3182 c := auxIntToInt32(v.AuxInt)
3183 if v_0.Op != OpAMD64ANDQconst {
3184 break
3185 }
3186 d := auxIntToInt32(v_0.AuxInt)
3187 x := v_0.Args[0]
3188 v.reset(OpAMD64ANDQconst)
3189 v.AuxInt = int32ToAuxInt(c & d)
3190 v.AddArg(x)
3191 return true
3192 }
3193
3194
3195 for {
3196 if auxIntToInt32(v.AuxInt) != 0xFF {
3197 break
3198 }
3199 x := v_0
3200 v.reset(OpAMD64MOVBQZX)
3201 v.AddArg(x)
3202 return true
3203 }
3204
3205
3206 for {
3207 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3208 break
3209 }
3210 x := v_0
3211 v.reset(OpAMD64MOVWQZX)
3212 v.AddArg(x)
3213 return true
3214 }
3215
3216
3217 for {
3218 if auxIntToInt32(v.AuxInt) != 0 {
3219 break
3220 }
3221 v.reset(OpAMD64MOVQconst)
3222 v.AuxInt = int64ToAuxInt(0)
3223 return true
3224 }
3225
3226
3227 for {
3228 if auxIntToInt32(v.AuxInt) != -1 {
3229 break
3230 }
3231 x := v_0
3232 v.copyOf(x)
3233 return true
3234 }
3235
3236
3237 for {
3238 c := auxIntToInt32(v.AuxInt)
3239 if v_0.Op != OpAMD64MOVQconst {
3240 break
3241 }
3242 d := auxIntToInt64(v_0.AuxInt)
3243 v.reset(OpAMD64MOVQconst)
3244 v.AuxInt = int64ToAuxInt(int64(c) & d)
3245 return true
3246 }
3247 return false
3248 }
3249 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3250 v_1 := v.Args[1]
3251 v_0 := v.Args[0]
3252
3253
3254
3255 for {
3256 valoff1 := auxIntToValAndOff(v.AuxInt)
3257 sym := auxToSym(v.Aux)
3258 if v_0.Op != OpAMD64ADDQconst {
3259 break
3260 }
3261 off2 := auxIntToInt32(v_0.AuxInt)
3262 base := v_0.Args[0]
3263 mem := v_1
3264 if !(ValAndOff(valoff1).canAdd32(off2)) {
3265 break
3266 }
3267 v.reset(OpAMD64ANDQconstmodify)
3268 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3269 v.Aux = symToAux(sym)
3270 v.AddArg2(base, mem)
3271 return true
3272 }
3273
3274
3275
3276 for {
3277 valoff1 := auxIntToValAndOff(v.AuxInt)
3278 sym1 := auxToSym(v.Aux)
3279 if v_0.Op != OpAMD64LEAQ {
3280 break
3281 }
3282 off2 := auxIntToInt32(v_0.AuxInt)
3283 sym2 := auxToSym(v_0.Aux)
3284 base := v_0.Args[0]
3285 mem := v_1
3286 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3287 break
3288 }
3289 v.reset(OpAMD64ANDQconstmodify)
3290 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3291 v.Aux = symToAux(mergeSym(sym1, sym2))
3292 v.AddArg2(base, mem)
3293 return true
3294 }
3295 return false
3296 }
3297 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3298 v_2 := v.Args[2]
3299 v_1 := v.Args[1]
3300 v_0 := v.Args[0]
3301 b := v.Block
3302 typ := &b.Func.Config.Types
3303
3304
3305
3306 for {
3307 off1 := auxIntToInt32(v.AuxInt)
3308 sym := auxToSym(v.Aux)
3309 val := v_0
3310 if v_1.Op != OpAMD64ADDQconst {
3311 break
3312 }
3313 off2 := auxIntToInt32(v_1.AuxInt)
3314 base := v_1.Args[0]
3315 mem := v_2
3316 if !(is32Bit(int64(off1) + int64(off2))) {
3317 break
3318 }
3319 v.reset(OpAMD64ANDQload)
3320 v.AuxInt = int32ToAuxInt(off1 + off2)
3321 v.Aux = symToAux(sym)
3322 v.AddArg3(val, base, mem)
3323 return true
3324 }
3325
3326
3327
3328 for {
3329 off1 := auxIntToInt32(v.AuxInt)
3330 sym1 := auxToSym(v.Aux)
3331 val := v_0
3332 if v_1.Op != OpAMD64LEAQ {
3333 break
3334 }
3335 off2 := auxIntToInt32(v_1.AuxInt)
3336 sym2 := auxToSym(v_1.Aux)
3337 base := v_1.Args[0]
3338 mem := v_2
3339 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3340 break
3341 }
3342 v.reset(OpAMD64ANDQload)
3343 v.AuxInt = int32ToAuxInt(off1 + off2)
3344 v.Aux = symToAux(mergeSym(sym1, sym2))
3345 v.AddArg3(val, base, mem)
3346 return true
3347 }
3348
3349
3350 for {
3351 off := auxIntToInt32(v.AuxInt)
3352 sym := auxToSym(v.Aux)
3353 x := v_0
3354 ptr := v_1
3355 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3356 break
3357 }
3358 y := v_2.Args[1]
3359 if ptr != v_2.Args[0] {
3360 break
3361 }
3362 v.reset(OpAMD64ANDQ)
3363 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3364 v0.AddArg(y)
3365 v.AddArg2(x, v0)
3366 return true
3367 }
3368 return false
3369 }
3370 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3371 v_2 := v.Args[2]
3372 v_1 := v.Args[1]
3373 v_0 := v.Args[0]
3374
3375
3376
3377 for {
3378 off1 := auxIntToInt32(v.AuxInt)
3379 sym := auxToSym(v.Aux)
3380 if v_0.Op != OpAMD64ADDQconst {
3381 break
3382 }
3383 off2 := auxIntToInt32(v_0.AuxInt)
3384 base := v_0.Args[0]
3385 val := v_1
3386 mem := v_2
3387 if !(is32Bit(int64(off1) + int64(off2))) {
3388 break
3389 }
3390 v.reset(OpAMD64ANDQmodify)
3391 v.AuxInt = int32ToAuxInt(off1 + off2)
3392 v.Aux = symToAux(sym)
3393 v.AddArg3(base, val, mem)
3394 return true
3395 }
3396
3397
3398
3399 for {
3400 off1 := auxIntToInt32(v.AuxInt)
3401 sym1 := auxToSym(v.Aux)
3402 if v_0.Op != OpAMD64LEAQ {
3403 break
3404 }
3405 off2 := auxIntToInt32(v_0.AuxInt)
3406 sym2 := auxToSym(v_0.Aux)
3407 base := v_0.Args[0]
3408 val := v_1
3409 mem := v_2
3410 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3411 break
3412 }
3413 v.reset(OpAMD64ANDQmodify)
3414 v.AuxInt = int32ToAuxInt(off1 + off2)
3415 v.Aux = symToAux(mergeSym(sym1, sym2))
3416 v.AddArg3(base, val, mem)
3417 return true
3418 }
3419 return false
3420 }
3421 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3422 v_0 := v.Args[0]
3423 b := v.Block
3424
3425
3426 for {
3427 if v_0.Op != OpAMD64ORQconst {
3428 break
3429 }
3430 t := v_0.Type
3431 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3432 break
3433 }
3434 v_0_0 := v_0.Args[0]
3435 if v_0_0.Op != OpAMD64MOVBQZX {
3436 break
3437 }
3438 x := v_0_0.Args[0]
3439 v.reset(OpAMD64BSFQ)
3440 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3441 v0.AuxInt = int32ToAuxInt(1 << 8)
3442 v0.AddArg(x)
3443 v.AddArg(v0)
3444 return true
3445 }
3446
3447
3448 for {
3449 if v_0.Op != OpAMD64ORQconst {
3450 break
3451 }
3452 t := v_0.Type
3453 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3454 break
3455 }
3456 v_0_0 := v_0.Args[0]
3457 if v_0_0.Op != OpAMD64MOVWQZX {
3458 break
3459 }
3460 x := v_0_0.Args[0]
3461 v.reset(OpAMD64BSFQ)
3462 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3463 v0.AuxInt = int32ToAuxInt(1 << 16)
3464 v0.AddArg(x)
3465 v.AddArg(v0)
3466 return true
3467 }
3468 return false
3469 }
3470 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
3471 v_0 := v.Args[0]
3472 b := v.Block
3473 typ := &b.Func.Config.Types
3474
3475
3476 for {
3477 if v_0.Op != OpAMD64BSWAPL {
3478 break
3479 }
3480 p := v_0.Args[0]
3481 v.copyOf(p)
3482 return true
3483 }
3484
3485
3486
3487 for {
3488 x := v_0
3489 if x.Op != OpAMD64MOVLload {
3490 break
3491 }
3492 i := auxIntToInt32(x.AuxInt)
3493 s := auxToSym(x.Aux)
3494 mem := x.Args[1]
3495 p := x.Args[0]
3496 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3497 break
3498 }
3499 b = x.Block
3500 v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32)
3501 v.copyOf(v0)
3502 v0.AuxInt = int32ToAuxInt(i)
3503 v0.Aux = symToAux(s)
3504 v0.AddArg2(p, mem)
3505 return true
3506 }
3507
3508
3509
3510 for {
3511 x := v_0
3512 if x.Op != OpAMD64MOVBELload {
3513 break
3514 }
3515 i := auxIntToInt32(x.AuxInt)
3516 s := auxToSym(x.Aux)
3517 mem := x.Args[1]
3518 p := x.Args[0]
3519 if !(x.Uses == 1) {
3520 break
3521 }
3522 b = x.Block
3523 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32)
3524 v.copyOf(v0)
3525 v0.AuxInt = int32ToAuxInt(i)
3526 v0.Aux = symToAux(s)
3527 v0.AddArg2(p, mem)
3528 return true
3529 }
3530 return false
3531 }
3532 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
3533 v_0 := v.Args[0]
3534 b := v.Block
3535 typ := &b.Func.Config.Types
3536
3537
3538 for {
3539 if v_0.Op != OpAMD64BSWAPQ {
3540 break
3541 }
3542 p := v_0.Args[0]
3543 v.copyOf(p)
3544 return true
3545 }
3546
3547
3548
3549 for {
3550 x := v_0
3551 if x.Op != OpAMD64MOVQload {
3552 break
3553 }
3554 i := auxIntToInt32(x.AuxInt)
3555 s := auxToSym(x.Aux)
3556 mem := x.Args[1]
3557 p := x.Args[0]
3558 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3559 break
3560 }
3561 b = x.Block
3562 v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64)
3563 v.copyOf(v0)
3564 v0.AuxInt = int32ToAuxInt(i)
3565 v0.Aux = symToAux(s)
3566 v0.AddArg2(p, mem)
3567 return true
3568 }
3569
3570
3571
3572 for {
3573 x := v_0
3574 if x.Op != OpAMD64MOVBEQload {
3575 break
3576 }
3577 i := auxIntToInt32(x.AuxInt)
3578 s := auxToSym(x.Aux)
3579 mem := x.Args[1]
3580 p := x.Args[0]
3581 if !(x.Uses == 1) {
3582 break
3583 }
3584 b = x.Block
3585 v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64)
3586 v.copyOf(v0)
3587 v0.AuxInt = int32ToAuxInt(i)
3588 v0.Aux = symToAux(s)
3589 v0.AddArg2(p, mem)
3590 return true
3591 }
3592 return false
3593 }
3594 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3595 v_0 := v.Args[0]
3596
3597
3598 for {
3599 c := auxIntToInt8(v.AuxInt)
3600 if v_0.Op != OpAMD64MOVQconst {
3601 break
3602 }
3603 d := auxIntToInt64(v_0.AuxInt)
3604 v.reset(OpAMD64MOVQconst)
3605 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3606 return true
3607 }
3608 return false
3609 }
3610 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3611 v_0 := v.Args[0]
3612
3613
3614
3615 for {
3616 c := auxIntToInt8(v.AuxInt)
3617 if v_0.Op != OpAMD64SHRQconst {
3618 break
3619 }
3620 d := auxIntToInt8(v_0.AuxInt)
3621 x := v_0.Args[0]
3622 if !((c + d) < 64) {
3623 break
3624 }
3625 v.reset(OpAMD64BTQconst)
3626 v.AuxInt = int8ToAuxInt(c + d)
3627 v.AddArg(x)
3628 return true
3629 }
3630
3631
3632
3633 for {
3634 c := auxIntToInt8(v.AuxInt)
3635 if v_0.Op != OpAMD64SHLQconst {
3636 break
3637 }
3638 d := auxIntToInt8(v_0.AuxInt)
3639 x := v_0.Args[0]
3640 if !(c > d) {
3641 break
3642 }
3643 v.reset(OpAMD64BTLconst)
3644 v.AuxInt = int8ToAuxInt(c - d)
3645 v.AddArg(x)
3646 return true
3647 }
3648
3649
3650 for {
3651 if auxIntToInt8(v.AuxInt) != 0 {
3652 break
3653 }
3654 s := v_0
3655 if s.Op != OpAMD64SHRQ {
3656 break
3657 }
3658 y := s.Args[1]
3659 x := s.Args[0]
3660 v.reset(OpAMD64BTQ)
3661 v.AddArg2(y, x)
3662 return true
3663 }
3664
3665
3666
3667 for {
3668 c := auxIntToInt8(v.AuxInt)
3669 if v_0.Op != OpAMD64SHRLconst {
3670 break
3671 }
3672 d := auxIntToInt8(v_0.AuxInt)
3673 x := v_0.Args[0]
3674 if !((c + d) < 32) {
3675 break
3676 }
3677 v.reset(OpAMD64BTLconst)
3678 v.AuxInt = int8ToAuxInt(c + d)
3679 v.AddArg(x)
3680 return true
3681 }
3682
3683
3684
3685 for {
3686 c := auxIntToInt8(v.AuxInt)
3687 if v_0.Op != OpAMD64SHLLconst {
3688 break
3689 }
3690 d := auxIntToInt8(v_0.AuxInt)
3691 x := v_0.Args[0]
3692 if !(c > d) {
3693 break
3694 }
3695 v.reset(OpAMD64BTLconst)
3696 v.AuxInt = int8ToAuxInt(c - d)
3697 v.AddArg(x)
3698 return true
3699 }
3700
3701
3702 for {
3703 if auxIntToInt8(v.AuxInt) != 0 {
3704 break
3705 }
3706 s := v_0
3707 if s.Op != OpAMD64SHRL {
3708 break
3709 }
3710 y := s.Args[1]
3711 x := s.Args[0]
3712 v.reset(OpAMD64BTL)
3713 v.AddArg2(y, x)
3714 return true
3715 }
3716
3717
3718 for {
3719 if auxIntToInt8(v.AuxInt) != 0 {
3720 break
3721 }
3722 s := v_0
3723 if s.Op != OpAMD64SHRXL {
3724 break
3725 }
3726 y := s.Args[1]
3727 x := s.Args[0]
3728 v.reset(OpAMD64BTL)
3729 v.AddArg2(y, x)
3730 return true
3731 }
3732 return false
3733 }
3734 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3735 v_0 := v.Args[0]
3736
3737
3738
3739 for {
3740 c := auxIntToInt8(v.AuxInt)
3741 if v_0.Op != OpAMD64SHRQconst {
3742 break
3743 }
3744 d := auxIntToInt8(v_0.AuxInt)
3745 x := v_0.Args[0]
3746 if !((c + d) < 64) {
3747 break
3748 }
3749 v.reset(OpAMD64BTQconst)
3750 v.AuxInt = int8ToAuxInt(c + d)
3751 v.AddArg(x)
3752 return true
3753 }
3754
3755
3756
3757 for {
3758 c := auxIntToInt8(v.AuxInt)
3759 if v_0.Op != OpAMD64SHLQconst {
3760 break
3761 }
3762 d := auxIntToInt8(v_0.AuxInt)
3763 x := v_0.Args[0]
3764 if !(c > d) {
3765 break
3766 }
3767 v.reset(OpAMD64BTQconst)
3768 v.AuxInt = int8ToAuxInt(c - d)
3769 v.AddArg(x)
3770 return true
3771 }
3772
3773
3774 for {
3775 if auxIntToInt8(v.AuxInt) != 0 {
3776 break
3777 }
3778 s := v_0
3779 if s.Op != OpAMD64SHRQ {
3780 break
3781 }
3782 y := s.Args[1]
3783 x := s.Args[0]
3784 v.reset(OpAMD64BTQ)
3785 v.AddArg2(y, x)
3786 return true
3787 }
3788 return false
3789 }
3790 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
3791 v_0 := v.Args[0]
3792
3793
3794 for {
3795 c := auxIntToInt8(v.AuxInt)
3796 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
3797 break
3798 }
3799 x := v_0.Args[0]
3800 v.reset(OpAMD64BTRQconst)
3801 v.AuxInt = int8ToAuxInt(c)
3802 v.AddArg(x)
3803 return true
3804 }
3805
3806
3807 for {
3808 c := auxIntToInt8(v.AuxInt)
3809 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3810 break
3811 }
3812 x := v_0.Args[0]
3813 v.reset(OpAMD64BTRQconst)
3814 v.AuxInt = int8ToAuxInt(c)
3815 v.AddArg(x)
3816 return true
3817 }
3818
3819
3820 for {
3821 c := auxIntToInt8(v.AuxInt)
3822 if v_0.Op != OpAMD64MOVQconst {
3823 break
3824 }
3825 d := auxIntToInt64(v_0.AuxInt)
3826 v.reset(OpAMD64MOVQconst)
3827 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
3828 return true
3829 }
3830 return false
3831 }
3832 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
3833 v_0 := v.Args[0]
3834
3835
3836 for {
3837 c := auxIntToInt8(v.AuxInt)
3838 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
3839 break
3840 }
3841 x := v_0.Args[0]
3842 v.reset(OpAMD64BTSQconst)
3843 v.AuxInt = int8ToAuxInt(c)
3844 v.AddArg(x)
3845 return true
3846 }
3847
3848
3849 for {
3850 c := auxIntToInt8(v.AuxInt)
3851 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3852 break
3853 }
3854 x := v_0.Args[0]
3855 v.reset(OpAMD64BTSQconst)
3856 v.AuxInt = int8ToAuxInt(c)
3857 v.AddArg(x)
3858 return true
3859 }
3860
3861
3862 for {
3863 c := auxIntToInt8(v.AuxInt)
3864 if v_0.Op != OpAMD64MOVQconst {
3865 break
3866 }
3867 d := auxIntToInt64(v_0.AuxInt)
3868 v.reset(OpAMD64MOVQconst)
3869 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
3870 return true
3871 }
3872 return false
3873 }
3874 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
3875 v_2 := v.Args[2]
3876 v_1 := v.Args[1]
3877 v_0 := v.Args[0]
3878
3879
3880 for {
3881 x := v_0
3882 y := v_1
3883 if v_2.Op != OpAMD64InvertFlags {
3884 break
3885 }
3886 cond := v_2.Args[0]
3887 v.reset(OpAMD64CMOVLLS)
3888 v.AddArg3(x, y, cond)
3889 return true
3890 }
3891
3892
3893 for {
3894 x := v_1
3895 if v_2.Op != OpAMD64FlagEQ {
3896 break
3897 }
3898 v.copyOf(x)
3899 return true
3900 }
3901
3902
3903 for {
3904 x := v_1
3905 if v_2.Op != OpAMD64FlagGT_UGT {
3906 break
3907 }
3908 v.copyOf(x)
3909 return true
3910 }
3911
3912
3913 for {
3914 y := v_0
3915 if v_2.Op != OpAMD64FlagGT_ULT {
3916 break
3917 }
3918 v.copyOf(y)
3919 return true
3920 }
3921
3922
3923 for {
3924 y := v_0
3925 if v_2.Op != OpAMD64FlagLT_ULT {
3926 break
3927 }
3928 v.copyOf(y)
3929 return true
3930 }
3931
3932
3933 for {
3934 x := v_1
3935 if v_2.Op != OpAMD64FlagLT_UGT {
3936 break
3937 }
3938 v.copyOf(x)
3939 return true
3940 }
3941 return false
3942 }
3943 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
3944 v_2 := v.Args[2]
3945 v_1 := v.Args[1]
3946 v_0 := v.Args[0]
3947
3948
3949 for {
3950 x := v_0
3951 y := v_1
3952 if v_2.Op != OpAMD64InvertFlags {
3953 break
3954 }
3955 cond := v_2.Args[0]
3956 v.reset(OpAMD64CMOVLHI)
3957 v.AddArg3(x, y, cond)
3958 return true
3959 }
3960
3961
3962 for {
3963 y := v_0
3964 if v_2.Op != OpAMD64FlagEQ {
3965 break
3966 }
3967 v.copyOf(y)
3968 return true
3969 }
3970
3971
3972 for {
3973 y := v_0
3974 if v_2.Op != OpAMD64FlagGT_UGT {
3975 break
3976 }
3977 v.copyOf(y)
3978 return true
3979 }
3980
3981
3982 for {
3983 x := v_1
3984 if v_2.Op != OpAMD64FlagGT_ULT {
3985 break
3986 }
3987 v.copyOf(x)
3988 return true
3989 }
3990
3991
3992 for {
3993 x := v_1
3994 if v_2.Op != OpAMD64FlagLT_ULT {
3995 break
3996 }
3997 v.copyOf(x)
3998 return true
3999 }
4000
4001
4002 for {
4003 y := v_0
4004 if v_2.Op != OpAMD64FlagLT_UGT {
4005 break
4006 }
4007 v.copyOf(y)
4008 return true
4009 }
4010 return false
4011 }
4012 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4013 v_2 := v.Args[2]
4014 v_1 := v.Args[1]
4015 v_0 := v.Args[0]
4016 b := v.Block
4017
4018
4019 for {
4020 x := v_0
4021 y := v_1
4022 if v_2.Op != OpAMD64InvertFlags {
4023 break
4024 }
4025 cond := v_2.Args[0]
4026 v.reset(OpAMD64CMOVLEQ)
4027 v.AddArg3(x, y, cond)
4028 return true
4029 }
4030
4031
4032 for {
4033 x := v_1
4034 if v_2.Op != OpAMD64FlagEQ {
4035 break
4036 }
4037 v.copyOf(x)
4038 return true
4039 }
4040
4041
4042 for {
4043 y := v_0
4044 if v_2.Op != OpAMD64FlagGT_UGT {
4045 break
4046 }
4047 v.copyOf(y)
4048 return true
4049 }
4050
4051
4052 for {
4053 y := v_0
4054 if v_2.Op != OpAMD64FlagGT_ULT {
4055 break
4056 }
4057 v.copyOf(y)
4058 return true
4059 }
4060
4061
4062 for {
4063 y := v_0
4064 if v_2.Op != OpAMD64FlagLT_ULT {
4065 break
4066 }
4067 v.copyOf(y)
4068 return true
4069 }
4070
4071
4072 for {
4073 y := v_0
4074 if v_2.Op != OpAMD64FlagLT_UGT {
4075 break
4076 }
4077 v.copyOf(y)
4078 return true
4079 }
4080
4081
4082 for {
4083 x := v_0
4084 y := v_1
4085 if v_2.Op != OpAMD64TESTQ {
4086 break
4087 }
4088 _ = v_2.Args[1]
4089 v_2_0 := v_2.Args[0]
4090 v_2_1 := v_2.Args[1]
4091 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4092 s := v_2_0
4093 if s.Op != OpSelect0 {
4094 continue
4095 }
4096 blsr := s.Args[0]
4097 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4098 continue
4099 }
4100 v.reset(OpAMD64CMOVLEQ)
4101 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4102 v0.AddArg(blsr)
4103 v.AddArg3(x, y, v0)
4104 return true
4105 }
4106 break
4107 }
4108
4109
4110 for {
4111 x := v_0
4112 y := v_1
4113 if v_2.Op != OpAMD64TESTL {
4114 break
4115 }
4116 _ = v_2.Args[1]
4117 v_2_0 := v_2.Args[0]
4118 v_2_1 := v_2.Args[1]
4119 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4120 s := v_2_0
4121 if s.Op != OpSelect0 {
4122 continue
4123 }
4124 blsr := s.Args[0]
4125 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4126 continue
4127 }
4128 v.reset(OpAMD64CMOVLEQ)
4129 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4130 v0.AddArg(blsr)
4131 v.AddArg3(x, y, v0)
4132 return true
4133 }
4134 break
4135 }
4136 return false
4137 }
4138 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4139 v_2 := v.Args[2]
4140 v_1 := v.Args[1]
4141 v_0 := v.Args[0]
4142
4143
4144 for {
4145 x := v_0
4146 y := v_1
4147 if v_2.Op != OpAMD64InvertFlags {
4148 break
4149 }
4150 cond := v_2.Args[0]
4151 v.reset(OpAMD64CMOVLLE)
4152 v.AddArg3(x, y, cond)
4153 return true
4154 }
4155
4156
4157 for {
4158 x := v_1
4159 if v_2.Op != OpAMD64FlagEQ {
4160 break
4161 }
4162 v.copyOf(x)
4163 return true
4164 }
4165
4166
4167 for {
4168 x := v_1
4169 if v_2.Op != OpAMD64FlagGT_UGT {
4170 break
4171 }
4172 v.copyOf(x)
4173 return true
4174 }
4175
4176
4177 for {
4178 x := v_1
4179 if v_2.Op != OpAMD64FlagGT_ULT {
4180 break
4181 }
4182 v.copyOf(x)
4183 return true
4184 }
4185
4186
4187 for {
4188 y := v_0
4189 if v_2.Op != OpAMD64FlagLT_ULT {
4190 break
4191 }
4192 v.copyOf(y)
4193 return true
4194 }
4195
4196
4197 for {
4198 y := v_0
4199 if v_2.Op != OpAMD64FlagLT_UGT {
4200 break
4201 }
4202 v.copyOf(y)
4203 return true
4204 }
4205 return false
4206 }
4207 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4208 v_2 := v.Args[2]
4209 v_1 := v.Args[1]
4210 v_0 := v.Args[0]
4211
4212
4213 for {
4214 x := v_0
4215 y := v_1
4216 if v_2.Op != OpAMD64InvertFlags {
4217 break
4218 }
4219 cond := v_2.Args[0]
4220 v.reset(OpAMD64CMOVLLT)
4221 v.AddArg3(x, y, cond)
4222 return true
4223 }
4224
4225
4226 for {
4227 y := v_0
4228 if v_2.Op != OpAMD64FlagEQ {
4229 break
4230 }
4231 v.copyOf(y)
4232 return true
4233 }
4234
4235
4236 for {
4237 x := v_1
4238 if v_2.Op != OpAMD64FlagGT_UGT {
4239 break
4240 }
4241 v.copyOf(x)
4242 return true
4243 }
4244
4245
4246 for {
4247 x := v_1
4248 if v_2.Op != OpAMD64FlagGT_ULT {
4249 break
4250 }
4251 v.copyOf(x)
4252 return true
4253 }
4254
4255
4256 for {
4257 y := v_0
4258 if v_2.Op != OpAMD64FlagLT_ULT {
4259 break
4260 }
4261 v.copyOf(y)
4262 return true
4263 }
4264
4265
4266 for {
4267 y := v_0
4268 if v_2.Op != OpAMD64FlagLT_UGT {
4269 break
4270 }
4271 v.copyOf(y)
4272 return true
4273 }
4274 return false
4275 }
4276 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4277 v_2 := v.Args[2]
4278 v_1 := v.Args[1]
4279 v_0 := v.Args[0]
4280
4281
4282 for {
4283 x := v_0
4284 y := v_1
4285 if v_2.Op != OpAMD64InvertFlags {
4286 break
4287 }
4288 cond := v_2.Args[0]
4289 v.reset(OpAMD64CMOVLCS)
4290 v.AddArg3(x, y, cond)
4291 return true
4292 }
4293
4294
4295 for {
4296 y := v_0
4297 if v_2.Op != OpAMD64FlagEQ {
4298 break
4299 }
4300 v.copyOf(y)
4301 return true
4302 }
4303
4304
4305 for {
4306 x := v_1
4307 if v_2.Op != OpAMD64FlagGT_UGT {
4308 break
4309 }
4310 v.copyOf(x)
4311 return true
4312 }
4313
4314
4315 for {
4316 y := v_0
4317 if v_2.Op != OpAMD64FlagGT_ULT {
4318 break
4319 }
4320 v.copyOf(y)
4321 return true
4322 }
4323
4324
4325 for {
4326 y := v_0
4327 if v_2.Op != OpAMD64FlagLT_ULT {
4328 break
4329 }
4330 v.copyOf(y)
4331 return true
4332 }
4333
4334
4335 for {
4336 x := v_1
4337 if v_2.Op != OpAMD64FlagLT_UGT {
4338 break
4339 }
4340 v.copyOf(x)
4341 return true
4342 }
4343 return false
4344 }
4345 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
4346 v_2 := v.Args[2]
4347 v_1 := v.Args[1]
4348 v_0 := v.Args[0]
4349
4350
4351 for {
4352 x := v_0
4353 y := v_1
4354 if v_2.Op != OpAMD64InvertFlags {
4355 break
4356 }
4357 cond := v_2.Args[0]
4358 v.reset(OpAMD64CMOVLGE)
4359 v.AddArg3(x, y, cond)
4360 return true
4361 }
4362
4363
4364 for {
4365 x := v_1
4366 if v_2.Op != OpAMD64FlagEQ {
4367 break
4368 }
4369 v.copyOf(x)
4370 return true
4371 }
4372
4373
4374 for {
4375 y := v_0
4376 if v_2.Op != OpAMD64FlagGT_UGT {
4377 break
4378 }
4379 v.copyOf(y)
4380 return true
4381 }
4382
4383
4384 for {
4385 y := v_0
4386 if v_2.Op != OpAMD64FlagGT_ULT {
4387 break
4388 }
4389 v.copyOf(y)
4390 return true
4391 }
4392
4393
4394 for {
4395 x := v_1
4396 if v_2.Op != OpAMD64FlagLT_ULT {
4397 break
4398 }
4399 v.copyOf(x)
4400 return true
4401 }
4402
4403
4404 for {
4405 x := v_1
4406 if v_2.Op != OpAMD64FlagLT_UGT {
4407 break
4408 }
4409 v.copyOf(x)
4410 return true
4411 }
4412 return false
4413 }
4414 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
4415 v_2 := v.Args[2]
4416 v_1 := v.Args[1]
4417 v_0 := v.Args[0]
4418
4419
4420 for {
4421 x := v_0
4422 y := v_1
4423 if v_2.Op != OpAMD64InvertFlags {
4424 break
4425 }
4426 cond := v_2.Args[0]
4427 v.reset(OpAMD64CMOVLCC)
4428 v.AddArg3(x, y, cond)
4429 return true
4430 }
4431
4432
4433 for {
4434 x := v_1
4435 if v_2.Op != OpAMD64FlagEQ {
4436 break
4437 }
4438 v.copyOf(x)
4439 return true
4440 }
4441
4442
4443 for {
4444 y := v_0
4445 if v_2.Op != OpAMD64FlagGT_UGT {
4446 break
4447 }
4448 v.copyOf(y)
4449 return true
4450 }
4451
4452
4453 for {
4454 x := v_1
4455 if v_2.Op != OpAMD64FlagGT_ULT {
4456 break
4457 }
4458 v.copyOf(x)
4459 return true
4460 }
4461
4462
4463 for {
4464 x := v_1
4465 if v_2.Op != OpAMD64FlagLT_ULT {
4466 break
4467 }
4468 v.copyOf(x)
4469 return true
4470 }
4471
4472
4473 for {
4474 y := v_0
4475 if v_2.Op != OpAMD64FlagLT_UGT {
4476 break
4477 }
4478 v.copyOf(y)
4479 return true
4480 }
4481 return false
4482 }
4483 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
4484 v_2 := v.Args[2]
4485 v_1 := v.Args[1]
4486 v_0 := v.Args[0]
4487
4488
4489 for {
4490 x := v_0
4491 y := v_1
4492 if v_2.Op != OpAMD64InvertFlags {
4493 break
4494 }
4495 cond := v_2.Args[0]
4496 v.reset(OpAMD64CMOVLGT)
4497 v.AddArg3(x, y, cond)
4498 return true
4499 }
4500
4501
4502 for {
4503 y := v_0
4504 if v_2.Op != OpAMD64FlagEQ {
4505 break
4506 }
4507 v.copyOf(y)
4508 return true
4509 }
4510
4511
4512 for {
4513 y := v_0
4514 if v_2.Op != OpAMD64FlagGT_UGT {
4515 break
4516 }
4517 v.copyOf(y)
4518 return true
4519 }
4520
4521
4522 for {
4523 y := v_0
4524 if v_2.Op != OpAMD64FlagGT_ULT {
4525 break
4526 }
4527 v.copyOf(y)
4528 return true
4529 }
4530
4531
4532 for {
4533 x := v_1
4534 if v_2.Op != OpAMD64FlagLT_ULT {
4535 break
4536 }
4537 v.copyOf(x)
4538 return true
4539 }
4540
4541
4542 for {
4543 x := v_1
4544 if v_2.Op != OpAMD64FlagLT_UGT {
4545 break
4546 }
4547 v.copyOf(x)
4548 return true
4549 }
4550 return false
4551 }
4552 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
4553 v_2 := v.Args[2]
4554 v_1 := v.Args[1]
4555 v_0 := v.Args[0]
4556 b := v.Block
4557
4558
4559 for {
4560 x := v_0
4561 y := v_1
4562 if v_2.Op != OpAMD64InvertFlags {
4563 break
4564 }
4565 cond := v_2.Args[0]
4566 v.reset(OpAMD64CMOVLNE)
4567 v.AddArg3(x, y, cond)
4568 return true
4569 }
4570
4571
4572 for {
4573 y := v_0
4574 if v_2.Op != OpAMD64FlagEQ {
4575 break
4576 }
4577 v.copyOf(y)
4578 return true
4579 }
4580
4581
4582 for {
4583 x := v_1
4584 if v_2.Op != OpAMD64FlagGT_UGT {
4585 break
4586 }
4587 v.copyOf(x)
4588 return true
4589 }
4590
4591
4592 for {
4593 x := v_1
4594 if v_2.Op != OpAMD64FlagGT_ULT {
4595 break
4596 }
4597 v.copyOf(x)
4598 return true
4599 }
4600
4601
4602 for {
4603 x := v_1
4604 if v_2.Op != OpAMD64FlagLT_ULT {
4605 break
4606 }
4607 v.copyOf(x)
4608 return true
4609 }
4610
4611
4612 for {
4613 x := v_1
4614 if v_2.Op != OpAMD64FlagLT_UGT {
4615 break
4616 }
4617 v.copyOf(x)
4618 return true
4619 }
4620
4621
4622 for {
4623 x := v_0
4624 y := v_1
4625 if v_2.Op != OpAMD64TESTQ {
4626 break
4627 }
4628 _ = v_2.Args[1]
4629 v_2_0 := v_2.Args[0]
4630 v_2_1 := v_2.Args[1]
4631 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4632 s := v_2_0
4633 if s.Op != OpSelect0 {
4634 continue
4635 }
4636 blsr := s.Args[0]
4637 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4638 continue
4639 }
4640 v.reset(OpAMD64CMOVLNE)
4641 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4642 v0.AddArg(blsr)
4643 v.AddArg3(x, y, v0)
4644 return true
4645 }
4646 break
4647 }
4648
4649
4650 for {
4651 x := v_0
4652 y := v_1
4653 if v_2.Op != OpAMD64TESTL {
4654 break
4655 }
4656 _ = v_2.Args[1]
4657 v_2_0 := v_2.Args[0]
4658 v_2_1 := v_2.Args[1]
4659 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4660 s := v_2_0
4661 if s.Op != OpSelect0 {
4662 continue
4663 }
4664 blsr := s.Args[0]
4665 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4666 continue
4667 }
4668 v.reset(OpAMD64CMOVLNE)
4669 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4670 v0.AddArg(blsr)
4671 v.AddArg3(x, y, v0)
4672 return true
4673 }
4674 break
4675 }
4676 return false
4677 }
4678 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
4679 v_2 := v.Args[2]
4680 v_1 := v.Args[1]
4681 v_0 := v.Args[0]
4682
4683
4684 for {
4685 x := v_0
4686 y := v_1
4687 if v_2.Op != OpAMD64InvertFlags {
4688 break
4689 }
4690 cond := v_2.Args[0]
4691 v.reset(OpAMD64CMOVQLS)
4692 v.AddArg3(x, y, cond)
4693 return true
4694 }
4695
4696
4697 for {
4698 x := v_1
4699 if v_2.Op != OpAMD64FlagEQ {
4700 break
4701 }
4702 v.copyOf(x)
4703 return true
4704 }
4705
4706
4707 for {
4708 x := v_1
4709 if v_2.Op != OpAMD64FlagGT_UGT {
4710 break
4711 }
4712 v.copyOf(x)
4713 return true
4714 }
4715
4716
4717 for {
4718 y := v_0
4719 if v_2.Op != OpAMD64FlagGT_ULT {
4720 break
4721 }
4722 v.copyOf(y)
4723 return true
4724 }
4725
4726
4727 for {
4728 y := v_0
4729 if v_2.Op != OpAMD64FlagLT_ULT {
4730 break
4731 }
4732 v.copyOf(y)
4733 return true
4734 }
4735
4736
4737 for {
4738 x := v_1
4739 if v_2.Op != OpAMD64FlagLT_UGT {
4740 break
4741 }
4742 v.copyOf(x)
4743 return true
4744 }
4745 return false
4746 }
4747 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
4748 v_2 := v.Args[2]
4749 v_1 := v.Args[1]
4750 v_0 := v.Args[0]
4751
4752
4753 for {
4754 x := v_0
4755 y := v_1
4756 if v_2.Op != OpAMD64InvertFlags {
4757 break
4758 }
4759 cond := v_2.Args[0]
4760 v.reset(OpAMD64CMOVQHI)
4761 v.AddArg3(x, y, cond)
4762 return true
4763 }
4764
4765
4766 for {
4767 y := v_0
4768 if v_2.Op != OpAMD64FlagEQ {
4769 break
4770 }
4771 v.copyOf(y)
4772 return true
4773 }
4774
4775
4776 for {
4777 y := v_0
4778 if v_2.Op != OpAMD64FlagGT_UGT {
4779 break
4780 }
4781 v.copyOf(y)
4782 return true
4783 }
4784
4785
4786 for {
4787 x := v_1
4788 if v_2.Op != OpAMD64FlagGT_ULT {
4789 break
4790 }
4791 v.copyOf(x)
4792 return true
4793 }
4794
4795
4796 for {
4797 x := v_1
4798 if v_2.Op != OpAMD64FlagLT_ULT {
4799 break
4800 }
4801 v.copyOf(x)
4802 return true
4803 }
4804
4805
4806 for {
4807 y := v_0
4808 if v_2.Op != OpAMD64FlagLT_UGT {
4809 break
4810 }
4811 v.copyOf(y)
4812 return true
4813 }
4814 return false
4815 }
4816 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
4817 v_2 := v.Args[2]
4818 v_1 := v.Args[1]
4819 v_0 := v.Args[0]
4820 b := v.Block
4821
4822
4823 for {
4824 x := v_0
4825 y := v_1
4826 if v_2.Op != OpAMD64InvertFlags {
4827 break
4828 }
4829 cond := v_2.Args[0]
4830 v.reset(OpAMD64CMOVQEQ)
4831 v.AddArg3(x, y, cond)
4832 return true
4833 }
4834
4835
4836 for {
4837 x := v_1
4838 if v_2.Op != OpAMD64FlagEQ {
4839 break
4840 }
4841 v.copyOf(x)
4842 return true
4843 }
4844
4845
4846 for {
4847 y := v_0
4848 if v_2.Op != OpAMD64FlagGT_UGT {
4849 break
4850 }
4851 v.copyOf(y)
4852 return true
4853 }
4854
4855
4856 for {
4857 y := v_0
4858 if v_2.Op != OpAMD64FlagGT_ULT {
4859 break
4860 }
4861 v.copyOf(y)
4862 return true
4863 }
4864
4865
4866 for {
4867 y := v_0
4868 if v_2.Op != OpAMD64FlagLT_ULT {
4869 break
4870 }
4871 v.copyOf(y)
4872 return true
4873 }
4874
4875
4876 for {
4877 y := v_0
4878 if v_2.Op != OpAMD64FlagLT_UGT {
4879 break
4880 }
4881 v.copyOf(y)
4882 return true
4883 }
4884
4885
4886
4887 for {
4888 x := v_0
4889 if v_2.Op != OpSelect1 {
4890 break
4891 }
4892 v_2_0 := v_2.Args[0]
4893 if v_2_0.Op != OpAMD64BSFQ {
4894 break
4895 }
4896 v_2_0_0 := v_2_0.Args[0]
4897 if v_2_0_0.Op != OpAMD64ORQconst {
4898 break
4899 }
4900 c := auxIntToInt32(v_2_0_0.AuxInt)
4901 if !(c != 0) {
4902 break
4903 }
4904 v.copyOf(x)
4905 return true
4906 }
4907
4908
4909
4910 for {
4911 x := v_0
4912 if v_2.Op != OpSelect1 {
4913 break
4914 }
4915 v_2_0 := v_2.Args[0]
4916 if v_2_0.Op != OpAMD64BSRQ {
4917 break
4918 }
4919 v_2_0_0 := v_2_0.Args[0]
4920 if v_2_0_0.Op != OpAMD64ORQconst {
4921 break
4922 }
4923 c := auxIntToInt32(v_2_0_0.AuxInt)
4924 if !(c != 0) {
4925 break
4926 }
4927 v.copyOf(x)
4928 return true
4929 }
4930
4931
4932 for {
4933 x := v_0
4934 y := v_1
4935 if v_2.Op != OpAMD64TESTQ {
4936 break
4937 }
4938 _ = v_2.Args[1]
4939 v_2_0 := v_2.Args[0]
4940 v_2_1 := v_2.Args[1]
4941 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4942 s := v_2_0
4943 if s.Op != OpSelect0 {
4944 continue
4945 }
4946 blsr := s.Args[0]
4947 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4948 continue
4949 }
4950 v.reset(OpAMD64CMOVQEQ)
4951 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4952 v0.AddArg(blsr)
4953 v.AddArg3(x, y, v0)
4954 return true
4955 }
4956 break
4957 }
4958
4959
4960 for {
4961 x := v_0
4962 y := v_1
4963 if v_2.Op != OpAMD64TESTL {
4964 break
4965 }
4966 _ = v_2.Args[1]
4967 v_2_0 := v_2.Args[0]
4968 v_2_1 := v_2.Args[1]
4969 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4970 s := v_2_0
4971 if s.Op != OpSelect0 {
4972 continue
4973 }
4974 blsr := s.Args[0]
4975 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4976 continue
4977 }
4978 v.reset(OpAMD64CMOVQEQ)
4979 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4980 v0.AddArg(blsr)
4981 v.AddArg3(x, y, v0)
4982 return true
4983 }
4984 break
4985 }
4986 return false
4987 }
4988 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
4989 v_2 := v.Args[2]
4990 v_1 := v.Args[1]
4991 v_0 := v.Args[0]
4992
4993
4994 for {
4995 x := v_0
4996 y := v_1
4997 if v_2.Op != OpAMD64InvertFlags {
4998 break
4999 }
5000 cond := v_2.Args[0]
5001 v.reset(OpAMD64CMOVQLE)
5002 v.AddArg3(x, y, cond)
5003 return true
5004 }
5005
5006
5007 for {
5008 x := v_1
5009 if v_2.Op != OpAMD64FlagEQ {
5010 break
5011 }
5012 v.copyOf(x)
5013 return true
5014 }
5015
5016
5017 for {
5018 x := v_1
5019 if v_2.Op != OpAMD64FlagGT_UGT {
5020 break
5021 }
5022 v.copyOf(x)
5023 return true
5024 }
5025
5026
5027 for {
5028 x := v_1
5029 if v_2.Op != OpAMD64FlagGT_ULT {
5030 break
5031 }
5032 v.copyOf(x)
5033 return true
5034 }
5035
5036
5037 for {
5038 y := v_0
5039 if v_2.Op != OpAMD64FlagLT_ULT {
5040 break
5041 }
5042 v.copyOf(y)
5043 return true
5044 }
5045
5046
5047 for {
5048 y := v_0
5049 if v_2.Op != OpAMD64FlagLT_UGT {
5050 break
5051 }
5052 v.copyOf(y)
5053 return true
5054 }
5055 return false
5056 }
5057 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5058 v_2 := v.Args[2]
5059 v_1 := v.Args[1]
5060 v_0 := v.Args[0]
5061
5062
5063 for {
5064 x := v_0
5065 y := v_1
5066 if v_2.Op != OpAMD64InvertFlags {
5067 break
5068 }
5069 cond := v_2.Args[0]
5070 v.reset(OpAMD64CMOVQLT)
5071 v.AddArg3(x, y, cond)
5072 return true
5073 }
5074
5075
5076 for {
5077 y := v_0
5078 if v_2.Op != OpAMD64FlagEQ {
5079 break
5080 }
5081 v.copyOf(y)
5082 return true
5083 }
5084
5085
5086 for {
5087 x := v_1
5088 if v_2.Op != OpAMD64FlagGT_UGT {
5089 break
5090 }
5091 v.copyOf(x)
5092 return true
5093 }
5094
5095
5096 for {
5097 x := v_1
5098 if v_2.Op != OpAMD64FlagGT_ULT {
5099 break
5100 }
5101 v.copyOf(x)
5102 return true
5103 }
5104
5105
5106 for {
5107 y := v_0
5108 if v_2.Op != OpAMD64FlagLT_ULT {
5109 break
5110 }
5111 v.copyOf(y)
5112 return true
5113 }
5114
5115
5116 for {
5117 y := v_0
5118 if v_2.Op != OpAMD64FlagLT_UGT {
5119 break
5120 }
5121 v.copyOf(y)
5122 return true
5123 }
5124 return false
5125 }
5126 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5127 v_2 := v.Args[2]
5128 v_1 := v.Args[1]
5129 v_0 := v.Args[0]
5130
5131
5132 for {
5133 x := v_0
5134 y := v_1
5135 if v_2.Op != OpAMD64InvertFlags {
5136 break
5137 }
5138 cond := v_2.Args[0]
5139 v.reset(OpAMD64CMOVQCS)
5140 v.AddArg3(x, y, cond)
5141 return true
5142 }
5143
5144
5145 for {
5146 y := v_0
5147 if v_2.Op != OpAMD64FlagEQ {
5148 break
5149 }
5150 v.copyOf(y)
5151 return true
5152 }
5153
5154
5155 for {
5156 x := v_1
5157 if v_2.Op != OpAMD64FlagGT_UGT {
5158 break
5159 }
5160 v.copyOf(x)
5161 return true
5162 }
5163
5164
5165 for {
5166 y := v_0
5167 if v_2.Op != OpAMD64FlagGT_ULT {
5168 break
5169 }
5170 v.copyOf(y)
5171 return true
5172 }
5173
5174
5175 for {
5176 y := v_0
5177 if v_2.Op != OpAMD64FlagLT_ULT {
5178 break
5179 }
5180 v.copyOf(y)
5181 return true
5182 }
5183
5184
5185 for {
5186 x := v_1
5187 if v_2.Op != OpAMD64FlagLT_UGT {
5188 break
5189 }
5190 v.copyOf(x)
5191 return true
5192 }
5193 return false
5194 }
5195 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5196 v_2 := v.Args[2]
5197 v_1 := v.Args[1]
5198 v_0 := v.Args[0]
5199
5200
5201 for {
5202 x := v_0
5203 y := v_1
5204 if v_2.Op != OpAMD64InvertFlags {
5205 break
5206 }
5207 cond := v_2.Args[0]
5208 v.reset(OpAMD64CMOVQGE)
5209 v.AddArg3(x, y, cond)
5210 return true
5211 }
5212
5213
5214 for {
5215 x := v_1
5216 if v_2.Op != OpAMD64FlagEQ {
5217 break
5218 }
5219 v.copyOf(x)
5220 return true
5221 }
5222
5223
5224 for {
5225 y := v_0
5226 if v_2.Op != OpAMD64FlagGT_UGT {
5227 break
5228 }
5229 v.copyOf(y)
5230 return true
5231 }
5232
5233
5234 for {
5235 y := v_0
5236 if v_2.Op != OpAMD64FlagGT_ULT {
5237 break
5238 }
5239 v.copyOf(y)
5240 return true
5241 }
5242
5243
5244 for {
5245 x := v_1
5246 if v_2.Op != OpAMD64FlagLT_ULT {
5247 break
5248 }
5249 v.copyOf(x)
5250 return true
5251 }
5252
5253
5254 for {
5255 x := v_1
5256 if v_2.Op != OpAMD64FlagLT_UGT {
5257 break
5258 }
5259 v.copyOf(x)
5260 return true
5261 }
5262 return false
5263 }
5264 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5265 v_2 := v.Args[2]
5266 v_1 := v.Args[1]
5267 v_0 := v.Args[0]
5268
5269
5270 for {
5271 x := v_0
5272 y := v_1
5273 if v_2.Op != OpAMD64InvertFlags {
5274 break
5275 }
5276 cond := v_2.Args[0]
5277 v.reset(OpAMD64CMOVQCC)
5278 v.AddArg3(x, y, cond)
5279 return true
5280 }
5281
5282
5283 for {
5284 x := v_1
5285 if v_2.Op != OpAMD64FlagEQ {
5286 break
5287 }
5288 v.copyOf(x)
5289 return true
5290 }
5291
5292
5293 for {
5294 y := v_0
5295 if v_2.Op != OpAMD64FlagGT_UGT {
5296 break
5297 }
5298 v.copyOf(y)
5299 return true
5300 }
5301
5302
5303 for {
5304 x := v_1
5305 if v_2.Op != OpAMD64FlagGT_ULT {
5306 break
5307 }
5308 v.copyOf(x)
5309 return true
5310 }
5311
5312
5313 for {
5314 x := v_1
5315 if v_2.Op != OpAMD64FlagLT_ULT {
5316 break
5317 }
5318 v.copyOf(x)
5319 return true
5320 }
5321
5322
5323 for {
5324 y := v_0
5325 if v_2.Op != OpAMD64FlagLT_UGT {
5326 break
5327 }
5328 v.copyOf(y)
5329 return true
5330 }
5331 return false
5332 }
5333 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5334 v_2 := v.Args[2]
5335 v_1 := v.Args[1]
5336 v_0 := v.Args[0]
5337
5338
5339 for {
5340 x := v_0
5341 y := v_1
5342 if v_2.Op != OpAMD64InvertFlags {
5343 break
5344 }
5345 cond := v_2.Args[0]
5346 v.reset(OpAMD64CMOVQGT)
5347 v.AddArg3(x, y, cond)
5348 return true
5349 }
5350
5351
5352 for {
5353 y := v_0
5354 if v_2.Op != OpAMD64FlagEQ {
5355 break
5356 }
5357 v.copyOf(y)
5358 return true
5359 }
5360
5361
5362 for {
5363 y := v_0
5364 if v_2.Op != OpAMD64FlagGT_UGT {
5365 break
5366 }
5367 v.copyOf(y)
5368 return true
5369 }
5370
5371
5372 for {
5373 y := v_0
5374 if v_2.Op != OpAMD64FlagGT_ULT {
5375 break
5376 }
5377 v.copyOf(y)
5378 return true
5379 }
5380
5381
5382 for {
5383 x := v_1
5384 if v_2.Op != OpAMD64FlagLT_ULT {
5385 break
5386 }
5387 v.copyOf(x)
5388 return true
5389 }
5390
5391
5392 for {
5393 x := v_1
5394 if v_2.Op != OpAMD64FlagLT_UGT {
5395 break
5396 }
5397 v.copyOf(x)
5398 return true
5399 }
5400 return false
5401 }
5402 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5403 v_2 := v.Args[2]
5404 v_1 := v.Args[1]
5405 v_0 := v.Args[0]
5406 b := v.Block
5407
5408
5409 for {
5410 x := v_0
5411 y := v_1
5412 if v_2.Op != OpAMD64InvertFlags {
5413 break
5414 }
5415 cond := v_2.Args[0]
5416 v.reset(OpAMD64CMOVQNE)
5417 v.AddArg3(x, y, cond)
5418 return true
5419 }
5420
5421
5422 for {
5423 y := v_0
5424 if v_2.Op != OpAMD64FlagEQ {
5425 break
5426 }
5427 v.copyOf(y)
5428 return true
5429 }
5430
5431
5432 for {
5433 x := v_1
5434 if v_2.Op != OpAMD64FlagGT_UGT {
5435 break
5436 }
5437 v.copyOf(x)
5438 return true
5439 }
5440
5441
5442 for {
5443 x := v_1
5444 if v_2.Op != OpAMD64FlagGT_ULT {
5445 break
5446 }
5447 v.copyOf(x)
5448 return true
5449 }
5450
5451
5452 for {
5453 x := v_1
5454 if v_2.Op != OpAMD64FlagLT_ULT {
5455 break
5456 }
5457 v.copyOf(x)
5458 return true
5459 }
5460
5461
5462 for {
5463 x := v_1
5464 if v_2.Op != OpAMD64FlagLT_UGT {
5465 break
5466 }
5467 v.copyOf(x)
5468 return true
5469 }
5470
5471
5472 for {
5473 x := v_0
5474 y := v_1
5475 if v_2.Op != OpAMD64TESTQ {
5476 break
5477 }
5478 _ = v_2.Args[1]
5479 v_2_0 := v_2.Args[0]
5480 v_2_1 := v_2.Args[1]
5481 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5482 s := v_2_0
5483 if s.Op != OpSelect0 {
5484 continue
5485 }
5486 blsr := s.Args[0]
5487 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
5488 continue
5489 }
5490 v.reset(OpAMD64CMOVQNE)
5491 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5492 v0.AddArg(blsr)
5493 v.AddArg3(x, y, v0)
5494 return true
5495 }
5496 break
5497 }
5498
5499
5500 for {
5501 x := v_0
5502 y := v_1
5503 if v_2.Op != OpAMD64TESTL {
5504 break
5505 }
5506 _ = v_2.Args[1]
5507 v_2_0 := v_2.Args[0]
5508 v_2_1 := v_2.Args[1]
5509 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5510 s := v_2_0
5511 if s.Op != OpSelect0 {
5512 continue
5513 }
5514 blsr := s.Args[0]
5515 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
5516 continue
5517 }
5518 v.reset(OpAMD64CMOVQNE)
5519 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5520 v0.AddArg(blsr)
5521 v.AddArg3(x, y, v0)
5522 return true
5523 }
5524 break
5525 }
5526 return false
5527 }
5528 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
5529 v_2 := v.Args[2]
5530 v_1 := v.Args[1]
5531 v_0 := v.Args[0]
5532
5533
5534 for {
5535 x := v_0
5536 y := v_1
5537 if v_2.Op != OpAMD64InvertFlags {
5538 break
5539 }
5540 cond := v_2.Args[0]
5541 v.reset(OpAMD64CMOVWLS)
5542 v.AddArg3(x, y, cond)
5543 return true
5544 }
5545
5546
5547 for {
5548 x := v_1
5549 if v_2.Op != OpAMD64FlagEQ {
5550 break
5551 }
5552 v.copyOf(x)
5553 return true
5554 }
5555
5556
5557 for {
5558 x := v_1
5559 if v_2.Op != OpAMD64FlagGT_UGT {
5560 break
5561 }
5562 v.copyOf(x)
5563 return true
5564 }
5565
5566
5567 for {
5568 y := v_0
5569 if v_2.Op != OpAMD64FlagGT_ULT {
5570 break
5571 }
5572 v.copyOf(y)
5573 return true
5574 }
5575
5576
5577 for {
5578 y := v_0
5579 if v_2.Op != OpAMD64FlagLT_ULT {
5580 break
5581 }
5582 v.copyOf(y)
5583 return true
5584 }
5585
5586
5587 for {
5588 x := v_1
5589 if v_2.Op != OpAMD64FlagLT_UGT {
5590 break
5591 }
5592 v.copyOf(x)
5593 return true
5594 }
5595 return false
5596 }
5597 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
5598 v_2 := v.Args[2]
5599 v_1 := v.Args[1]
5600 v_0 := v.Args[0]
5601
5602
5603 for {
5604 x := v_0
5605 y := v_1
5606 if v_2.Op != OpAMD64InvertFlags {
5607 break
5608 }
5609 cond := v_2.Args[0]
5610 v.reset(OpAMD64CMOVWHI)
5611 v.AddArg3(x, y, cond)
5612 return true
5613 }
5614
5615
5616 for {
5617 y := v_0
5618 if v_2.Op != OpAMD64FlagEQ {
5619 break
5620 }
5621 v.copyOf(y)
5622 return true
5623 }
5624
5625
5626 for {
5627 y := v_0
5628 if v_2.Op != OpAMD64FlagGT_UGT {
5629 break
5630 }
5631 v.copyOf(y)
5632 return true
5633 }
5634
5635
5636 for {
5637 x := v_1
5638 if v_2.Op != OpAMD64FlagGT_ULT {
5639 break
5640 }
5641 v.copyOf(x)
5642 return true
5643 }
5644
5645
5646 for {
5647 x := v_1
5648 if v_2.Op != OpAMD64FlagLT_ULT {
5649 break
5650 }
5651 v.copyOf(x)
5652 return true
5653 }
5654
5655
5656 for {
5657 y := v_0
5658 if v_2.Op != OpAMD64FlagLT_UGT {
5659 break
5660 }
5661 v.copyOf(y)
5662 return true
5663 }
5664 return false
5665 }
5666 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
5667 v_2 := v.Args[2]
5668 v_1 := v.Args[1]
5669 v_0 := v.Args[0]
5670
5671
5672 for {
5673 x := v_0
5674 y := v_1
5675 if v_2.Op != OpAMD64InvertFlags {
5676 break
5677 }
5678 cond := v_2.Args[0]
5679 v.reset(OpAMD64CMOVWEQ)
5680 v.AddArg3(x, y, cond)
5681 return true
5682 }
5683
5684
5685 for {
5686 x := v_1
5687 if v_2.Op != OpAMD64FlagEQ {
5688 break
5689 }
5690 v.copyOf(x)
5691 return true
5692 }
5693
5694
5695 for {
5696 y := v_0
5697 if v_2.Op != OpAMD64FlagGT_UGT {
5698 break
5699 }
5700 v.copyOf(y)
5701 return true
5702 }
5703
5704
5705 for {
5706 y := v_0
5707 if v_2.Op != OpAMD64FlagGT_ULT {
5708 break
5709 }
5710 v.copyOf(y)
5711 return true
5712 }
5713
5714
5715 for {
5716 y := v_0
5717 if v_2.Op != OpAMD64FlagLT_ULT {
5718 break
5719 }
5720 v.copyOf(y)
5721 return true
5722 }
5723
5724
5725 for {
5726 y := v_0
5727 if v_2.Op != OpAMD64FlagLT_UGT {
5728 break
5729 }
5730 v.copyOf(y)
5731 return true
5732 }
5733 return false
5734 }
5735 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
5736 v_2 := v.Args[2]
5737 v_1 := v.Args[1]
5738 v_0 := v.Args[0]
5739
5740
5741 for {
5742 x := v_0
5743 y := v_1
5744 if v_2.Op != OpAMD64InvertFlags {
5745 break
5746 }
5747 cond := v_2.Args[0]
5748 v.reset(OpAMD64CMOVWLE)
5749 v.AddArg3(x, y, cond)
5750 return true
5751 }
5752
5753
5754 for {
5755 x := v_1
5756 if v_2.Op != OpAMD64FlagEQ {
5757 break
5758 }
5759 v.copyOf(x)
5760 return true
5761 }
5762
5763
5764 for {
5765 x := v_1
5766 if v_2.Op != OpAMD64FlagGT_UGT {
5767 break
5768 }
5769 v.copyOf(x)
5770 return true
5771 }
5772
5773
5774 for {
5775 x := v_1
5776 if v_2.Op != OpAMD64FlagGT_ULT {
5777 break
5778 }
5779 v.copyOf(x)
5780 return true
5781 }
5782
5783
5784 for {
5785 y := v_0
5786 if v_2.Op != OpAMD64FlagLT_ULT {
5787 break
5788 }
5789 v.copyOf(y)
5790 return true
5791 }
5792
5793
5794 for {
5795 y := v_0
5796 if v_2.Op != OpAMD64FlagLT_UGT {
5797 break
5798 }
5799 v.copyOf(y)
5800 return true
5801 }
5802 return false
5803 }
5804 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
5805 v_2 := v.Args[2]
5806 v_1 := v.Args[1]
5807 v_0 := v.Args[0]
5808
5809
5810 for {
5811 x := v_0
5812 y := v_1
5813 if v_2.Op != OpAMD64InvertFlags {
5814 break
5815 }
5816 cond := v_2.Args[0]
5817 v.reset(OpAMD64CMOVWLT)
5818 v.AddArg3(x, y, cond)
5819 return true
5820 }
5821
5822
5823 for {
5824 y := v_0
5825 if v_2.Op != OpAMD64FlagEQ {
5826 break
5827 }
5828 v.copyOf(y)
5829 return true
5830 }
5831
5832
5833 for {
5834 x := v_1
5835 if v_2.Op != OpAMD64FlagGT_UGT {
5836 break
5837 }
5838 v.copyOf(x)
5839 return true
5840 }
5841
5842
5843 for {
5844 x := v_1
5845 if v_2.Op != OpAMD64FlagGT_ULT {
5846 break
5847 }
5848 v.copyOf(x)
5849 return true
5850 }
5851
5852
5853 for {
5854 y := v_0
5855 if v_2.Op != OpAMD64FlagLT_ULT {
5856 break
5857 }
5858 v.copyOf(y)
5859 return true
5860 }
5861
5862
5863 for {
5864 y := v_0
5865 if v_2.Op != OpAMD64FlagLT_UGT {
5866 break
5867 }
5868 v.copyOf(y)
5869 return true
5870 }
5871 return false
5872 }
5873 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
5874 v_2 := v.Args[2]
5875 v_1 := v.Args[1]
5876 v_0 := v.Args[0]
5877
5878
5879 for {
5880 x := v_0
5881 y := v_1
5882 if v_2.Op != OpAMD64InvertFlags {
5883 break
5884 }
5885 cond := v_2.Args[0]
5886 v.reset(OpAMD64CMOVWCS)
5887 v.AddArg3(x, y, cond)
5888 return true
5889 }
5890
5891
5892 for {
5893 y := v_0
5894 if v_2.Op != OpAMD64FlagEQ {
5895 break
5896 }
5897 v.copyOf(y)
5898 return true
5899 }
5900
5901
5902 for {
5903 x := v_1
5904 if v_2.Op != OpAMD64FlagGT_UGT {
5905 break
5906 }
5907 v.copyOf(x)
5908 return true
5909 }
5910
5911
5912 for {
5913 y := v_0
5914 if v_2.Op != OpAMD64FlagGT_ULT {
5915 break
5916 }
5917 v.copyOf(y)
5918 return true
5919 }
5920
5921
5922 for {
5923 y := v_0
5924 if v_2.Op != OpAMD64FlagLT_ULT {
5925 break
5926 }
5927 v.copyOf(y)
5928 return true
5929 }
5930
5931
5932 for {
5933 x := v_1
5934 if v_2.Op != OpAMD64FlagLT_UGT {
5935 break
5936 }
5937 v.copyOf(x)
5938 return true
5939 }
5940 return false
5941 }
5942 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
5943 v_2 := v.Args[2]
5944 v_1 := v.Args[1]
5945 v_0 := v.Args[0]
5946
5947
5948 for {
5949 x := v_0
5950 y := v_1
5951 if v_2.Op != OpAMD64InvertFlags {
5952 break
5953 }
5954 cond := v_2.Args[0]
5955 v.reset(OpAMD64CMOVWGE)
5956 v.AddArg3(x, y, cond)
5957 return true
5958 }
5959
5960
5961 for {
5962 x := v_1
5963 if v_2.Op != OpAMD64FlagEQ {
5964 break
5965 }
5966 v.copyOf(x)
5967 return true
5968 }
5969
5970
5971 for {
5972 y := v_0
5973 if v_2.Op != OpAMD64FlagGT_UGT {
5974 break
5975 }
5976 v.copyOf(y)
5977 return true
5978 }
5979
5980
5981 for {
5982 y := v_0
5983 if v_2.Op != OpAMD64FlagGT_ULT {
5984 break
5985 }
5986 v.copyOf(y)
5987 return true
5988 }
5989
5990
5991 for {
5992 x := v_1
5993 if v_2.Op != OpAMD64FlagLT_ULT {
5994 break
5995 }
5996 v.copyOf(x)
5997 return true
5998 }
5999
6000
6001 for {
6002 x := v_1
6003 if v_2.Op != OpAMD64FlagLT_UGT {
6004 break
6005 }
6006 v.copyOf(x)
6007 return true
6008 }
6009 return false
6010 }
6011 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6012 v_2 := v.Args[2]
6013 v_1 := v.Args[1]
6014 v_0 := v.Args[0]
6015
6016
6017 for {
6018 x := v_0
6019 y := v_1
6020 if v_2.Op != OpAMD64InvertFlags {
6021 break
6022 }
6023 cond := v_2.Args[0]
6024 v.reset(OpAMD64CMOVWCC)
6025 v.AddArg3(x, y, cond)
6026 return true
6027 }
6028
6029
6030 for {
6031 x := v_1
6032 if v_2.Op != OpAMD64FlagEQ {
6033 break
6034 }
6035 v.copyOf(x)
6036 return true
6037 }
6038
6039
6040 for {
6041 y := v_0
6042 if v_2.Op != OpAMD64FlagGT_UGT {
6043 break
6044 }
6045 v.copyOf(y)
6046 return true
6047 }
6048
6049
6050 for {
6051 x := v_1
6052 if v_2.Op != OpAMD64FlagGT_ULT {
6053 break
6054 }
6055 v.copyOf(x)
6056 return true
6057 }
6058
6059
6060 for {
6061 x := v_1
6062 if v_2.Op != OpAMD64FlagLT_ULT {
6063 break
6064 }
6065 v.copyOf(x)
6066 return true
6067 }
6068
6069
6070 for {
6071 y := v_0
6072 if v_2.Op != OpAMD64FlagLT_UGT {
6073 break
6074 }
6075 v.copyOf(y)
6076 return true
6077 }
6078 return false
6079 }
6080 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6081 v_2 := v.Args[2]
6082 v_1 := v.Args[1]
6083 v_0 := v.Args[0]
6084
6085
6086 for {
6087 x := v_0
6088 y := v_1
6089 if v_2.Op != OpAMD64InvertFlags {
6090 break
6091 }
6092 cond := v_2.Args[0]
6093 v.reset(OpAMD64CMOVWGT)
6094 v.AddArg3(x, y, cond)
6095 return true
6096 }
6097
6098
6099 for {
6100 y := v_0
6101 if v_2.Op != OpAMD64FlagEQ {
6102 break
6103 }
6104 v.copyOf(y)
6105 return true
6106 }
6107
6108
6109 for {
6110 y := v_0
6111 if v_2.Op != OpAMD64FlagGT_UGT {
6112 break
6113 }
6114 v.copyOf(y)
6115 return true
6116 }
6117
6118
6119 for {
6120 y := v_0
6121 if v_2.Op != OpAMD64FlagGT_ULT {
6122 break
6123 }
6124 v.copyOf(y)
6125 return true
6126 }
6127
6128
6129 for {
6130 x := v_1
6131 if v_2.Op != OpAMD64FlagLT_ULT {
6132 break
6133 }
6134 v.copyOf(x)
6135 return true
6136 }
6137
6138
6139 for {
6140 x := v_1
6141 if v_2.Op != OpAMD64FlagLT_UGT {
6142 break
6143 }
6144 v.copyOf(x)
6145 return true
6146 }
6147 return false
6148 }
6149 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6150 v_2 := v.Args[2]
6151 v_1 := v.Args[1]
6152 v_0 := v.Args[0]
6153
6154
6155 for {
6156 x := v_0
6157 y := v_1
6158 if v_2.Op != OpAMD64InvertFlags {
6159 break
6160 }
6161 cond := v_2.Args[0]
6162 v.reset(OpAMD64CMOVWNE)
6163 v.AddArg3(x, y, cond)
6164 return true
6165 }
6166
6167
6168 for {
6169 y := v_0
6170 if v_2.Op != OpAMD64FlagEQ {
6171 break
6172 }
6173 v.copyOf(y)
6174 return true
6175 }
6176
6177
6178 for {
6179 x := v_1
6180 if v_2.Op != OpAMD64FlagGT_UGT {
6181 break
6182 }
6183 v.copyOf(x)
6184 return true
6185 }
6186
6187
6188 for {
6189 x := v_1
6190 if v_2.Op != OpAMD64FlagGT_ULT {
6191 break
6192 }
6193 v.copyOf(x)
6194 return true
6195 }
6196
6197
6198 for {
6199 x := v_1
6200 if v_2.Op != OpAMD64FlagLT_ULT {
6201 break
6202 }
6203 v.copyOf(x)
6204 return true
6205 }
6206
6207
6208 for {
6209 x := v_1
6210 if v_2.Op != OpAMD64FlagLT_UGT {
6211 break
6212 }
6213 v.copyOf(x)
6214 return true
6215 }
6216 return false
6217 }
6218 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6219 v_1 := v.Args[1]
6220 v_0 := v.Args[0]
6221 b := v.Block
6222
6223
6224 for {
6225 x := v_0
6226 if v_1.Op != OpAMD64MOVLconst {
6227 break
6228 }
6229 c := auxIntToInt32(v_1.AuxInt)
6230 v.reset(OpAMD64CMPBconst)
6231 v.AuxInt = int8ToAuxInt(int8(c))
6232 v.AddArg(x)
6233 return true
6234 }
6235
6236
6237 for {
6238 if v_0.Op != OpAMD64MOVLconst {
6239 break
6240 }
6241 c := auxIntToInt32(v_0.AuxInt)
6242 x := v_1
6243 v.reset(OpAMD64InvertFlags)
6244 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6245 v0.AuxInt = int8ToAuxInt(int8(c))
6246 v0.AddArg(x)
6247 v.AddArg(v0)
6248 return true
6249 }
6250
6251
6252
6253 for {
6254 x := v_0
6255 y := v_1
6256 if !(canonLessThan(x, y)) {
6257 break
6258 }
6259 v.reset(OpAMD64InvertFlags)
6260 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6261 v0.AddArg2(y, x)
6262 v.AddArg(v0)
6263 return true
6264 }
6265
6266
6267
6268 for {
6269 l := v_0
6270 if l.Op != OpAMD64MOVBload {
6271 break
6272 }
6273 off := auxIntToInt32(l.AuxInt)
6274 sym := auxToSym(l.Aux)
6275 mem := l.Args[1]
6276 ptr := l.Args[0]
6277 x := v_1
6278 if !(canMergeLoad(v, l) && clobber(l)) {
6279 break
6280 }
6281 v.reset(OpAMD64CMPBload)
6282 v.AuxInt = int32ToAuxInt(off)
6283 v.Aux = symToAux(sym)
6284 v.AddArg3(ptr, x, mem)
6285 return true
6286 }
6287
6288
6289
6290 for {
6291 x := v_0
6292 l := v_1
6293 if l.Op != OpAMD64MOVBload {
6294 break
6295 }
6296 off := auxIntToInt32(l.AuxInt)
6297 sym := auxToSym(l.Aux)
6298 mem := l.Args[1]
6299 ptr := l.Args[0]
6300 if !(canMergeLoad(v, l) && clobber(l)) {
6301 break
6302 }
6303 v.reset(OpAMD64InvertFlags)
6304 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6305 v0.AuxInt = int32ToAuxInt(off)
6306 v0.Aux = symToAux(sym)
6307 v0.AddArg3(ptr, x, mem)
6308 v.AddArg(v0)
6309 return true
6310 }
6311 return false
6312 }
6313 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6314 v_0 := v.Args[0]
6315 b := v.Block
6316
6317
6318
6319 for {
6320 y := auxIntToInt8(v.AuxInt)
6321 if v_0.Op != OpAMD64MOVLconst {
6322 break
6323 }
6324 x := auxIntToInt32(v_0.AuxInt)
6325 if !(int8(x) == y) {
6326 break
6327 }
6328 v.reset(OpAMD64FlagEQ)
6329 return true
6330 }
6331
6332
6333
6334 for {
6335 y := auxIntToInt8(v.AuxInt)
6336 if v_0.Op != OpAMD64MOVLconst {
6337 break
6338 }
6339 x := auxIntToInt32(v_0.AuxInt)
6340 if !(int8(x) < y && uint8(x) < uint8(y)) {
6341 break
6342 }
6343 v.reset(OpAMD64FlagLT_ULT)
6344 return true
6345 }
6346
6347
6348
6349 for {
6350 y := auxIntToInt8(v.AuxInt)
6351 if v_0.Op != OpAMD64MOVLconst {
6352 break
6353 }
6354 x := auxIntToInt32(v_0.AuxInt)
6355 if !(int8(x) < y && uint8(x) > uint8(y)) {
6356 break
6357 }
6358 v.reset(OpAMD64FlagLT_UGT)
6359 return true
6360 }
6361
6362
6363
6364 for {
6365 y := auxIntToInt8(v.AuxInt)
6366 if v_0.Op != OpAMD64MOVLconst {
6367 break
6368 }
6369 x := auxIntToInt32(v_0.AuxInt)
6370 if !(int8(x) > y && uint8(x) < uint8(y)) {
6371 break
6372 }
6373 v.reset(OpAMD64FlagGT_ULT)
6374 return true
6375 }
6376
6377
6378
6379 for {
6380 y := auxIntToInt8(v.AuxInt)
6381 if v_0.Op != OpAMD64MOVLconst {
6382 break
6383 }
6384 x := auxIntToInt32(v_0.AuxInt)
6385 if !(int8(x) > y && uint8(x) > uint8(y)) {
6386 break
6387 }
6388 v.reset(OpAMD64FlagGT_UGT)
6389 return true
6390 }
6391
6392
6393
6394 for {
6395 n := auxIntToInt8(v.AuxInt)
6396 if v_0.Op != OpAMD64ANDLconst {
6397 break
6398 }
6399 m := auxIntToInt32(v_0.AuxInt)
6400 if !(0 <= int8(m) && int8(m) < n) {
6401 break
6402 }
6403 v.reset(OpAMD64FlagLT_ULT)
6404 return true
6405 }
6406
6407
6408
6409 for {
6410 if auxIntToInt8(v.AuxInt) != 0 {
6411 break
6412 }
6413 a := v_0
6414 if a.Op != OpAMD64ANDL {
6415 break
6416 }
6417 y := a.Args[1]
6418 x := a.Args[0]
6419 if !(a.Uses == 1) {
6420 break
6421 }
6422 v.reset(OpAMD64TESTB)
6423 v.AddArg2(x, y)
6424 return true
6425 }
6426
6427
6428
6429 for {
6430 if auxIntToInt8(v.AuxInt) != 0 {
6431 break
6432 }
6433 a := v_0
6434 if a.Op != OpAMD64ANDLconst {
6435 break
6436 }
6437 c := auxIntToInt32(a.AuxInt)
6438 x := a.Args[0]
6439 if !(a.Uses == 1) {
6440 break
6441 }
6442 v.reset(OpAMD64TESTBconst)
6443 v.AuxInt = int8ToAuxInt(int8(c))
6444 v.AddArg(x)
6445 return true
6446 }
6447
6448
6449 for {
6450 if auxIntToInt8(v.AuxInt) != 0 {
6451 break
6452 }
6453 x := v_0
6454 v.reset(OpAMD64TESTB)
6455 v.AddArg2(x, x)
6456 return true
6457 }
6458
6459
6460
6461 for {
6462 c := auxIntToInt8(v.AuxInt)
6463 l := v_0
6464 if l.Op != OpAMD64MOVBload {
6465 break
6466 }
6467 off := auxIntToInt32(l.AuxInt)
6468 sym := auxToSym(l.Aux)
6469 mem := l.Args[1]
6470 ptr := l.Args[0]
6471 if !(l.Uses == 1 && clobber(l)) {
6472 break
6473 }
6474 b = l.Block
6475 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6476 v.copyOf(v0)
6477 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
6478 v0.Aux = symToAux(sym)
6479 v0.AddArg2(ptr, mem)
6480 return true
6481 }
6482 return false
6483 }
6484 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6485 v_1 := v.Args[1]
6486 v_0 := v.Args[0]
6487
6488
6489
6490 for {
6491 valoff1 := auxIntToValAndOff(v.AuxInt)
6492 sym := auxToSym(v.Aux)
6493 if v_0.Op != OpAMD64ADDQconst {
6494 break
6495 }
6496 off2 := auxIntToInt32(v_0.AuxInt)
6497 base := v_0.Args[0]
6498 mem := v_1
6499 if !(ValAndOff(valoff1).canAdd32(off2)) {
6500 break
6501 }
6502 v.reset(OpAMD64CMPBconstload)
6503 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6504 v.Aux = symToAux(sym)
6505 v.AddArg2(base, mem)
6506 return true
6507 }
6508
6509
6510
6511 for {
6512 valoff1 := auxIntToValAndOff(v.AuxInt)
6513 sym1 := auxToSym(v.Aux)
6514 if v_0.Op != OpAMD64LEAQ {
6515 break
6516 }
6517 off2 := auxIntToInt32(v_0.AuxInt)
6518 sym2 := auxToSym(v_0.Aux)
6519 base := v_0.Args[0]
6520 mem := v_1
6521 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6522 break
6523 }
6524 v.reset(OpAMD64CMPBconstload)
6525 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6526 v.Aux = symToAux(mergeSym(sym1, sym2))
6527 v.AddArg2(base, mem)
6528 return true
6529 }
6530 return false
6531 }
6532 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
6533 v_2 := v.Args[2]
6534 v_1 := v.Args[1]
6535 v_0 := v.Args[0]
6536
6537
6538
6539 for {
6540 off1 := auxIntToInt32(v.AuxInt)
6541 sym := auxToSym(v.Aux)
6542 if v_0.Op != OpAMD64ADDQconst {
6543 break
6544 }
6545 off2 := auxIntToInt32(v_0.AuxInt)
6546 base := v_0.Args[0]
6547 val := v_1
6548 mem := v_2
6549 if !(is32Bit(int64(off1) + int64(off2))) {
6550 break
6551 }
6552 v.reset(OpAMD64CMPBload)
6553 v.AuxInt = int32ToAuxInt(off1 + off2)
6554 v.Aux = symToAux(sym)
6555 v.AddArg3(base, val, mem)
6556 return true
6557 }
6558
6559
6560
6561 for {
6562 off1 := auxIntToInt32(v.AuxInt)
6563 sym1 := auxToSym(v.Aux)
6564 if v_0.Op != OpAMD64LEAQ {
6565 break
6566 }
6567 off2 := auxIntToInt32(v_0.AuxInt)
6568 sym2 := auxToSym(v_0.Aux)
6569 base := v_0.Args[0]
6570 val := v_1
6571 mem := v_2
6572 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6573 break
6574 }
6575 v.reset(OpAMD64CMPBload)
6576 v.AuxInt = int32ToAuxInt(off1 + off2)
6577 v.Aux = symToAux(mergeSym(sym1, sym2))
6578 v.AddArg3(base, val, mem)
6579 return true
6580 }
6581
6582
6583 for {
6584 off := auxIntToInt32(v.AuxInt)
6585 sym := auxToSym(v.Aux)
6586 ptr := v_0
6587 if v_1.Op != OpAMD64MOVLconst {
6588 break
6589 }
6590 c := auxIntToInt32(v_1.AuxInt)
6591 mem := v_2
6592 v.reset(OpAMD64CMPBconstload)
6593 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
6594 v.Aux = symToAux(sym)
6595 v.AddArg2(ptr, mem)
6596 return true
6597 }
6598 return false
6599 }
6600 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
6601 v_1 := v.Args[1]
6602 v_0 := v.Args[0]
6603 b := v.Block
6604
6605
6606 for {
6607 x := v_0
6608 if v_1.Op != OpAMD64MOVLconst {
6609 break
6610 }
6611 c := auxIntToInt32(v_1.AuxInt)
6612 v.reset(OpAMD64CMPLconst)
6613 v.AuxInt = int32ToAuxInt(c)
6614 v.AddArg(x)
6615 return true
6616 }
6617
6618
6619 for {
6620 if v_0.Op != OpAMD64MOVLconst {
6621 break
6622 }
6623 c := auxIntToInt32(v_0.AuxInt)
6624 x := v_1
6625 v.reset(OpAMD64InvertFlags)
6626 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
6627 v0.AuxInt = int32ToAuxInt(c)
6628 v0.AddArg(x)
6629 v.AddArg(v0)
6630 return true
6631 }
6632
6633
6634
6635 for {
6636 x := v_0
6637 y := v_1
6638 if !(canonLessThan(x, y)) {
6639 break
6640 }
6641 v.reset(OpAMD64InvertFlags)
6642 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
6643 v0.AddArg2(y, x)
6644 v.AddArg(v0)
6645 return true
6646 }
6647
6648
6649
6650 for {
6651 l := v_0
6652 if l.Op != OpAMD64MOVLload {
6653 break
6654 }
6655 off := auxIntToInt32(l.AuxInt)
6656 sym := auxToSym(l.Aux)
6657 mem := l.Args[1]
6658 ptr := l.Args[0]
6659 x := v_1
6660 if !(canMergeLoad(v, l) && clobber(l)) {
6661 break
6662 }
6663 v.reset(OpAMD64CMPLload)
6664 v.AuxInt = int32ToAuxInt(off)
6665 v.Aux = symToAux(sym)
6666 v.AddArg3(ptr, x, mem)
6667 return true
6668 }
6669
6670
6671
6672 for {
6673 x := v_0
6674 l := v_1
6675 if l.Op != OpAMD64MOVLload {
6676 break
6677 }
6678 off := auxIntToInt32(l.AuxInt)
6679 sym := auxToSym(l.Aux)
6680 mem := l.Args[1]
6681 ptr := l.Args[0]
6682 if !(canMergeLoad(v, l) && clobber(l)) {
6683 break
6684 }
6685 v.reset(OpAMD64InvertFlags)
6686 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
6687 v0.AuxInt = int32ToAuxInt(off)
6688 v0.Aux = symToAux(sym)
6689 v0.AddArg3(ptr, x, mem)
6690 v.AddArg(v0)
6691 return true
6692 }
6693 return false
6694 }
6695 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
6696 v_0 := v.Args[0]
6697 b := v.Block
6698
6699
6700
6701 for {
6702 y := auxIntToInt32(v.AuxInt)
6703 if v_0.Op != OpAMD64MOVLconst {
6704 break
6705 }
6706 x := auxIntToInt32(v_0.AuxInt)
6707 if !(x == y) {
6708 break
6709 }
6710 v.reset(OpAMD64FlagEQ)
6711 return true
6712 }
6713
6714
6715
6716 for {
6717 y := auxIntToInt32(v.AuxInt)
6718 if v_0.Op != OpAMD64MOVLconst {
6719 break
6720 }
6721 x := auxIntToInt32(v_0.AuxInt)
6722 if !(x < y && uint32(x) < uint32(y)) {
6723 break
6724 }
6725 v.reset(OpAMD64FlagLT_ULT)
6726 return true
6727 }
6728
6729
6730
6731 for {
6732 y := auxIntToInt32(v.AuxInt)
6733 if v_0.Op != OpAMD64MOVLconst {
6734 break
6735 }
6736 x := auxIntToInt32(v_0.AuxInt)
6737 if !(x < y && uint32(x) > uint32(y)) {
6738 break
6739 }
6740 v.reset(OpAMD64FlagLT_UGT)
6741 return true
6742 }
6743
6744
6745
6746 for {
6747 y := auxIntToInt32(v.AuxInt)
6748 if v_0.Op != OpAMD64MOVLconst {
6749 break
6750 }
6751 x := auxIntToInt32(v_0.AuxInt)
6752 if !(x > y && uint32(x) < uint32(y)) {
6753 break
6754 }
6755 v.reset(OpAMD64FlagGT_ULT)
6756 return true
6757 }
6758
6759
6760
6761 for {
6762 y := auxIntToInt32(v.AuxInt)
6763 if v_0.Op != OpAMD64MOVLconst {
6764 break
6765 }
6766 x := auxIntToInt32(v_0.AuxInt)
6767 if !(x > y && uint32(x) > uint32(y)) {
6768 break
6769 }
6770 v.reset(OpAMD64FlagGT_UGT)
6771 return true
6772 }
6773
6774
6775
6776 for {
6777 n := auxIntToInt32(v.AuxInt)
6778 if v_0.Op != OpAMD64SHRLconst {
6779 break
6780 }
6781 c := auxIntToInt8(v_0.AuxInt)
6782 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
6783 break
6784 }
6785 v.reset(OpAMD64FlagLT_ULT)
6786 return true
6787 }
6788
6789
6790
6791 for {
6792 n := auxIntToInt32(v.AuxInt)
6793 if v_0.Op != OpAMD64ANDLconst {
6794 break
6795 }
6796 m := auxIntToInt32(v_0.AuxInt)
6797 if !(0 <= m && m < n) {
6798 break
6799 }
6800 v.reset(OpAMD64FlagLT_ULT)
6801 return true
6802 }
6803
6804
6805
6806 for {
6807 if auxIntToInt32(v.AuxInt) != 0 {
6808 break
6809 }
6810 a := v_0
6811 if a.Op != OpAMD64ANDL {
6812 break
6813 }
6814 y := a.Args[1]
6815 x := a.Args[0]
6816 if !(a.Uses == 1) {
6817 break
6818 }
6819 v.reset(OpAMD64TESTL)
6820 v.AddArg2(x, y)
6821 return true
6822 }
6823
6824
6825
6826 for {
6827 if auxIntToInt32(v.AuxInt) != 0 {
6828 break
6829 }
6830 a := v_0
6831 if a.Op != OpAMD64ANDLconst {
6832 break
6833 }
6834 c := auxIntToInt32(a.AuxInt)
6835 x := a.Args[0]
6836 if !(a.Uses == 1) {
6837 break
6838 }
6839 v.reset(OpAMD64TESTLconst)
6840 v.AuxInt = int32ToAuxInt(c)
6841 v.AddArg(x)
6842 return true
6843 }
6844
6845
6846 for {
6847 if auxIntToInt32(v.AuxInt) != 0 {
6848 break
6849 }
6850 x := v_0
6851 v.reset(OpAMD64TESTL)
6852 v.AddArg2(x, x)
6853 return true
6854 }
6855
6856
6857
6858 for {
6859 c := auxIntToInt32(v.AuxInt)
6860 l := v_0
6861 if l.Op != OpAMD64MOVLload {
6862 break
6863 }
6864 off := auxIntToInt32(l.AuxInt)
6865 sym := auxToSym(l.Aux)
6866 mem := l.Args[1]
6867 ptr := l.Args[0]
6868 if !(l.Uses == 1 && clobber(l)) {
6869 break
6870 }
6871 b = l.Block
6872 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
6873 v.copyOf(v0)
6874 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
6875 v0.Aux = symToAux(sym)
6876 v0.AddArg2(ptr, mem)
6877 return true
6878 }
6879 return false
6880 }
6881 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
6882 v_1 := v.Args[1]
6883 v_0 := v.Args[0]
6884
6885
6886
6887 for {
6888 valoff1 := auxIntToValAndOff(v.AuxInt)
6889 sym := auxToSym(v.Aux)
6890 if v_0.Op != OpAMD64ADDQconst {
6891 break
6892 }
6893 off2 := auxIntToInt32(v_0.AuxInt)
6894 base := v_0.Args[0]
6895 mem := v_1
6896 if !(ValAndOff(valoff1).canAdd32(off2)) {
6897 break
6898 }
6899 v.reset(OpAMD64CMPLconstload)
6900 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6901 v.Aux = symToAux(sym)
6902 v.AddArg2(base, mem)
6903 return true
6904 }
6905
6906
6907
6908 for {
6909 valoff1 := auxIntToValAndOff(v.AuxInt)
6910 sym1 := auxToSym(v.Aux)
6911 if v_0.Op != OpAMD64LEAQ {
6912 break
6913 }
6914 off2 := auxIntToInt32(v_0.AuxInt)
6915 sym2 := auxToSym(v_0.Aux)
6916 base := v_0.Args[0]
6917 mem := v_1
6918 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6919 break
6920 }
6921 v.reset(OpAMD64CMPLconstload)
6922 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6923 v.Aux = symToAux(mergeSym(sym1, sym2))
6924 v.AddArg2(base, mem)
6925 return true
6926 }
6927 return false
6928 }
6929 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
6930 v_2 := v.Args[2]
6931 v_1 := v.Args[1]
6932 v_0 := v.Args[0]
6933
6934
6935
6936 for {
6937 off1 := auxIntToInt32(v.AuxInt)
6938 sym := auxToSym(v.Aux)
6939 if v_0.Op != OpAMD64ADDQconst {
6940 break
6941 }
6942 off2 := auxIntToInt32(v_0.AuxInt)
6943 base := v_0.Args[0]
6944 val := v_1
6945 mem := v_2
6946 if !(is32Bit(int64(off1) + int64(off2))) {
6947 break
6948 }
6949 v.reset(OpAMD64CMPLload)
6950 v.AuxInt = int32ToAuxInt(off1 + off2)
6951 v.Aux = symToAux(sym)
6952 v.AddArg3(base, val, mem)
6953 return true
6954 }
6955
6956
6957
6958 for {
6959 off1 := auxIntToInt32(v.AuxInt)
6960 sym1 := auxToSym(v.Aux)
6961 if v_0.Op != OpAMD64LEAQ {
6962 break
6963 }
6964 off2 := auxIntToInt32(v_0.AuxInt)
6965 sym2 := auxToSym(v_0.Aux)
6966 base := v_0.Args[0]
6967 val := v_1
6968 mem := v_2
6969 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6970 break
6971 }
6972 v.reset(OpAMD64CMPLload)
6973 v.AuxInt = int32ToAuxInt(off1 + off2)
6974 v.Aux = symToAux(mergeSym(sym1, sym2))
6975 v.AddArg3(base, val, mem)
6976 return true
6977 }
6978
6979
6980 for {
6981 off := auxIntToInt32(v.AuxInt)
6982 sym := auxToSym(v.Aux)
6983 ptr := v_0
6984 if v_1.Op != OpAMD64MOVLconst {
6985 break
6986 }
6987 c := auxIntToInt32(v_1.AuxInt)
6988 mem := v_2
6989 v.reset(OpAMD64CMPLconstload)
6990 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
6991 v.Aux = symToAux(sym)
6992 v.AddArg2(ptr, mem)
6993 return true
6994 }
6995 return false
6996 }
6997 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
6998 v_1 := v.Args[1]
6999 v_0 := v.Args[0]
7000 b := v.Block
7001
7002
7003
7004 for {
7005 x := v_0
7006 if v_1.Op != OpAMD64MOVQconst {
7007 break
7008 }
7009 c := auxIntToInt64(v_1.AuxInt)
7010 if !(is32Bit(c)) {
7011 break
7012 }
7013 v.reset(OpAMD64CMPQconst)
7014 v.AuxInt = int32ToAuxInt(int32(c))
7015 v.AddArg(x)
7016 return true
7017 }
7018
7019
7020
7021 for {
7022 if v_0.Op != OpAMD64MOVQconst {
7023 break
7024 }
7025 c := auxIntToInt64(v_0.AuxInt)
7026 x := v_1
7027 if !(is32Bit(c)) {
7028 break
7029 }
7030 v.reset(OpAMD64InvertFlags)
7031 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7032 v0.AuxInt = int32ToAuxInt(int32(c))
7033 v0.AddArg(x)
7034 v.AddArg(v0)
7035 return true
7036 }
7037
7038
7039
7040 for {
7041 x := v_0
7042 y := v_1
7043 if !(canonLessThan(x, y)) {
7044 break
7045 }
7046 v.reset(OpAMD64InvertFlags)
7047 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7048 v0.AddArg2(y, x)
7049 v.AddArg(v0)
7050 return true
7051 }
7052
7053
7054
7055 for {
7056 if v_0.Op != OpAMD64MOVQconst {
7057 break
7058 }
7059 x := auxIntToInt64(v_0.AuxInt)
7060 if v_1.Op != OpAMD64MOVQconst {
7061 break
7062 }
7063 y := auxIntToInt64(v_1.AuxInt)
7064 if !(x == y) {
7065 break
7066 }
7067 v.reset(OpAMD64FlagEQ)
7068 return true
7069 }
7070
7071
7072
7073 for {
7074 if v_0.Op != OpAMD64MOVQconst {
7075 break
7076 }
7077 x := auxIntToInt64(v_0.AuxInt)
7078 if v_1.Op != OpAMD64MOVQconst {
7079 break
7080 }
7081 y := auxIntToInt64(v_1.AuxInt)
7082 if !(x < y && uint64(x) < uint64(y)) {
7083 break
7084 }
7085 v.reset(OpAMD64FlagLT_ULT)
7086 return true
7087 }
7088
7089
7090
7091 for {
7092 if v_0.Op != OpAMD64MOVQconst {
7093 break
7094 }
7095 x := auxIntToInt64(v_0.AuxInt)
7096 if v_1.Op != OpAMD64MOVQconst {
7097 break
7098 }
7099 y := auxIntToInt64(v_1.AuxInt)
7100 if !(x < y && uint64(x) > uint64(y)) {
7101 break
7102 }
7103 v.reset(OpAMD64FlagLT_UGT)
7104 return true
7105 }
7106
7107
7108
7109 for {
7110 if v_0.Op != OpAMD64MOVQconst {
7111 break
7112 }
7113 x := auxIntToInt64(v_0.AuxInt)
7114 if v_1.Op != OpAMD64MOVQconst {
7115 break
7116 }
7117 y := auxIntToInt64(v_1.AuxInt)
7118 if !(x > y && uint64(x) < uint64(y)) {
7119 break
7120 }
7121 v.reset(OpAMD64FlagGT_ULT)
7122 return true
7123 }
7124
7125
7126
7127 for {
7128 if v_0.Op != OpAMD64MOVQconst {
7129 break
7130 }
7131 x := auxIntToInt64(v_0.AuxInt)
7132 if v_1.Op != OpAMD64MOVQconst {
7133 break
7134 }
7135 y := auxIntToInt64(v_1.AuxInt)
7136 if !(x > y && uint64(x) > uint64(y)) {
7137 break
7138 }
7139 v.reset(OpAMD64FlagGT_UGT)
7140 return true
7141 }
7142
7143
7144
7145 for {
7146 l := v_0
7147 if l.Op != OpAMD64MOVQload {
7148 break
7149 }
7150 off := auxIntToInt32(l.AuxInt)
7151 sym := auxToSym(l.Aux)
7152 mem := l.Args[1]
7153 ptr := l.Args[0]
7154 x := v_1
7155 if !(canMergeLoad(v, l) && clobber(l)) {
7156 break
7157 }
7158 v.reset(OpAMD64CMPQload)
7159 v.AuxInt = int32ToAuxInt(off)
7160 v.Aux = symToAux(sym)
7161 v.AddArg3(ptr, x, mem)
7162 return true
7163 }
7164
7165
7166
7167 for {
7168 x := v_0
7169 l := v_1
7170 if l.Op != OpAMD64MOVQload {
7171 break
7172 }
7173 off := auxIntToInt32(l.AuxInt)
7174 sym := auxToSym(l.Aux)
7175 mem := l.Args[1]
7176 ptr := l.Args[0]
7177 if !(canMergeLoad(v, l) && clobber(l)) {
7178 break
7179 }
7180 v.reset(OpAMD64InvertFlags)
7181 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7182 v0.AuxInt = int32ToAuxInt(off)
7183 v0.Aux = symToAux(sym)
7184 v0.AddArg3(ptr, x, mem)
7185 v.AddArg(v0)
7186 return true
7187 }
7188 return false
7189 }
7190 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7191 v_0 := v.Args[0]
7192 b := v.Block
7193
7194
7195
7196 for {
7197 y := auxIntToInt32(v.AuxInt)
7198 if v_0.Op != OpAMD64MOVQconst {
7199 break
7200 }
7201 x := auxIntToInt64(v_0.AuxInt)
7202 if !(x == int64(y)) {
7203 break
7204 }
7205 v.reset(OpAMD64FlagEQ)
7206 return true
7207 }
7208
7209
7210
7211 for {
7212 y := auxIntToInt32(v.AuxInt)
7213 if v_0.Op != OpAMD64MOVQconst {
7214 break
7215 }
7216 x := auxIntToInt64(v_0.AuxInt)
7217 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7218 break
7219 }
7220 v.reset(OpAMD64FlagLT_ULT)
7221 return true
7222 }
7223
7224
7225
7226 for {
7227 y := auxIntToInt32(v.AuxInt)
7228 if v_0.Op != OpAMD64MOVQconst {
7229 break
7230 }
7231 x := auxIntToInt64(v_0.AuxInt)
7232 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7233 break
7234 }
7235 v.reset(OpAMD64FlagLT_UGT)
7236 return true
7237 }
7238
7239
7240
7241 for {
7242 y := auxIntToInt32(v.AuxInt)
7243 if v_0.Op != OpAMD64MOVQconst {
7244 break
7245 }
7246 x := auxIntToInt64(v_0.AuxInt)
7247 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7248 break
7249 }
7250 v.reset(OpAMD64FlagGT_ULT)
7251 return true
7252 }
7253
7254
7255
7256 for {
7257 y := auxIntToInt32(v.AuxInt)
7258 if v_0.Op != OpAMD64MOVQconst {
7259 break
7260 }
7261 x := auxIntToInt64(v_0.AuxInt)
7262 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7263 break
7264 }
7265 v.reset(OpAMD64FlagGT_UGT)
7266 return true
7267 }
7268
7269
7270
7271 for {
7272 c := auxIntToInt32(v.AuxInt)
7273 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7274 break
7275 }
7276 v.reset(OpAMD64FlagLT_ULT)
7277 return true
7278 }
7279
7280
7281
7282 for {
7283 c := auxIntToInt32(v.AuxInt)
7284 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7285 break
7286 }
7287 v.reset(OpAMD64FlagLT_ULT)
7288 return true
7289 }
7290
7291
7292
7293 for {
7294 n := auxIntToInt32(v.AuxInt)
7295 if v_0.Op != OpAMD64SHRQconst {
7296 break
7297 }
7298 c := auxIntToInt8(v_0.AuxInt)
7299 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7300 break
7301 }
7302 v.reset(OpAMD64FlagLT_ULT)
7303 return true
7304 }
7305
7306
7307
7308 for {
7309 n := auxIntToInt32(v.AuxInt)
7310 if v_0.Op != OpAMD64ANDQconst {
7311 break
7312 }
7313 m := auxIntToInt32(v_0.AuxInt)
7314 if !(0 <= m && m < n) {
7315 break
7316 }
7317 v.reset(OpAMD64FlagLT_ULT)
7318 return true
7319 }
7320
7321
7322
7323 for {
7324 n := auxIntToInt32(v.AuxInt)
7325 if v_0.Op != OpAMD64ANDLconst {
7326 break
7327 }
7328 m := auxIntToInt32(v_0.AuxInt)
7329 if !(0 <= m && m < n) {
7330 break
7331 }
7332 v.reset(OpAMD64FlagLT_ULT)
7333 return true
7334 }
7335
7336
7337
7338 for {
7339 if auxIntToInt32(v.AuxInt) != 0 {
7340 break
7341 }
7342 a := v_0
7343 if a.Op != OpAMD64ANDQ {
7344 break
7345 }
7346 y := a.Args[1]
7347 x := a.Args[0]
7348 if !(a.Uses == 1) {
7349 break
7350 }
7351 v.reset(OpAMD64TESTQ)
7352 v.AddArg2(x, y)
7353 return true
7354 }
7355
7356
7357
7358 for {
7359 if auxIntToInt32(v.AuxInt) != 0 {
7360 break
7361 }
7362 a := v_0
7363 if a.Op != OpAMD64ANDQconst {
7364 break
7365 }
7366 c := auxIntToInt32(a.AuxInt)
7367 x := a.Args[0]
7368 if !(a.Uses == 1) {
7369 break
7370 }
7371 v.reset(OpAMD64TESTQconst)
7372 v.AuxInt = int32ToAuxInt(c)
7373 v.AddArg(x)
7374 return true
7375 }
7376
7377
7378 for {
7379 if auxIntToInt32(v.AuxInt) != 0 {
7380 break
7381 }
7382 x := v_0
7383 v.reset(OpAMD64TESTQ)
7384 v.AddArg2(x, x)
7385 return true
7386 }
7387
7388
7389
7390 for {
7391 c := auxIntToInt32(v.AuxInt)
7392 l := v_0
7393 if l.Op != OpAMD64MOVQload {
7394 break
7395 }
7396 off := auxIntToInt32(l.AuxInt)
7397 sym := auxToSym(l.Aux)
7398 mem := l.Args[1]
7399 ptr := l.Args[0]
7400 if !(l.Uses == 1 && clobber(l)) {
7401 break
7402 }
7403 b = l.Block
7404 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7405 v.copyOf(v0)
7406 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7407 v0.Aux = symToAux(sym)
7408 v0.AddArg2(ptr, mem)
7409 return true
7410 }
7411 return false
7412 }
7413 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7414 v_1 := v.Args[1]
7415 v_0 := v.Args[0]
7416
7417
7418
7419 for {
7420 valoff1 := auxIntToValAndOff(v.AuxInt)
7421 sym := auxToSym(v.Aux)
7422 if v_0.Op != OpAMD64ADDQconst {
7423 break
7424 }
7425 off2 := auxIntToInt32(v_0.AuxInt)
7426 base := v_0.Args[0]
7427 mem := v_1
7428 if !(ValAndOff(valoff1).canAdd32(off2)) {
7429 break
7430 }
7431 v.reset(OpAMD64CMPQconstload)
7432 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7433 v.Aux = symToAux(sym)
7434 v.AddArg2(base, mem)
7435 return true
7436 }
7437
7438
7439
7440 for {
7441 valoff1 := auxIntToValAndOff(v.AuxInt)
7442 sym1 := auxToSym(v.Aux)
7443 if v_0.Op != OpAMD64LEAQ {
7444 break
7445 }
7446 off2 := auxIntToInt32(v_0.AuxInt)
7447 sym2 := auxToSym(v_0.Aux)
7448 base := v_0.Args[0]
7449 mem := v_1
7450 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7451 break
7452 }
7453 v.reset(OpAMD64CMPQconstload)
7454 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7455 v.Aux = symToAux(mergeSym(sym1, sym2))
7456 v.AddArg2(base, mem)
7457 return true
7458 }
7459 return false
7460 }
7461 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7462 v_2 := v.Args[2]
7463 v_1 := v.Args[1]
7464 v_0 := v.Args[0]
7465
7466
7467
7468 for {
7469 off1 := auxIntToInt32(v.AuxInt)
7470 sym := auxToSym(v.Aux)
7471 if v_0.Op != OpAMD64ADDQconst {
7472 break
7473 }
7474 off2 := auxIntToInt32(v_0.AuxInt)
7475 base := v_0.Args[0]
7476 val := v_1
7477 mem := v_2
7478 if !(is32Bit(int64(off1) + int64(off2))) {
7479 break
7480 }
7481 v.reset(OpAMD64CMPQload)
7482 v.AuxInt = int32ToAuxInt(off1 + off2)
7483 v.Aux = symToAux(sym)
7484 v.AddArg3(base, val, mem)
7485 return true
7486 }
7487
7488
7489
7490 for {
7491 off1 := auxIntToInt32(v.AuxInt)
7492 sym1 := auxToSym(v.Aux)
7493 if v_0.Op != OpAMD64LEAQ {
7494 break
7495 }
7496 off2 := auxIntToInt32(v_0.AuxInt)
7497 sym2 := auxToSym(v_0.Aux)
7498 base := v_0.Args[0]
7499 val := v_1
7500 mem := v_2
7501 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7502 break
7503 }
7504 v.reset(OpAMD64CMPQload)
7505 v.AuxInt = int32ToAuxInt(off1 + off2)
7506 v.Aux = symToAux(mergeSym(sym1, sym2))
7507 v.AddArg3(base, val, mem)
7508 return true
7509 }
7510
7511
7512
7513 for {
7514 off := auxIntToInt32(v.AuxInt)
7515 sym := auxToSym(v.Aux)
7516 ptr := v_0
7517 if v_1.Op != OpAMD64MOVQconst {
7518 break
7519 }
7520 c := auxIntToInt64(v_1.AuxInt)
7521 mem := v_2
7522 if !(validVal(c)) {
7523 break
7524 }
7525 v.reset(OpAMD64CMPQconstload)
7526 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7527 v.Aux = symToAux(sym)
7528 v.AddArg2(ptr, mem)
7529 return true
7530 }
7531 return false
7532 }
7533 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
7534 v_1 := v.Args[1]
7535 v_0 := v.Args[0]
7536 b := v.Block
7537
7538
7539 for {
7540 x := v_0
7541 if v_1.Op != OpAMD64MOVLconst {
7542 break
7543 }
7544 c := auxIntToInt32(v_1.AuxInt)
7545 v.reset(OpAMD64CMPWconst)
7546 v.AuxInt = int16ToAuxInt(int16(c))
7547 v.AddArg(x)
7548 return true
7549 }
7550
7551
7552 for {
7553 if v_0.Op != OpAMD64MOVLconst {
7554 break
7555 }
7556 c := auxIntToInt32(v_0.AuxInt)
7557 x := v_1
7558 v.reset(OpAMD64InvertFlags)
7559 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
7560 v0.AuxInt = int16ToAuxInt(int16(c))
7561 v0.AddArg(x)
7562 v.AddArg(v0)
7563 return true
7564 }
7565
7566
7567
7568 for {
7569 x := v_0
7570 y := v_1
7571 if !(canonLessThan(x, y)) {
7572 break
7573 }
7574 v.reset(OpAMD64InvertFlags)
7575 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
7576 v0.AddArg2(y, x)
7577 v.AddArg(v0)
7578 return true
7579 }
7580
7581
7582
7583 for {
7584 l := v_0
7585 if l.Op != OpAMD64MOVWload {
7586 break
7587 }
7588 off := auxIntToInt32(l.AuxInt)
7589 sym := auxToSym(l.Aux)
7590 mem := l.Args[1]
7591 ptr := l.Args[0]
7592 x := v_1
7593 if !(canMergeLoad(v, l) && clobber(l)) {
7594 break
7595 }
7596 v.reset(OpAMD64CMPWload)
7597 v.AuxInt = int32ToAuxInt(off)
7598 v.Aux = symToAux(sym)
7599 v.AddArg3(ptr, x, mem)
7600 return true
7601 }
7602
7603
7604
7605 for {
7606 x := v_0
7607 l := v_1
7608 if l.Op != OpAMD64MOVWload {
7609 break
7610 }
7611 off := auxIntToInt32(l.AuxInt)
7612 sym := auxToSym(l.Aux)
7613 mem := l.Args[1]
7614 ptr := l.Args[0]
7615 if !(canMergeLoad(v, l) && clobber(l)) {
7616 break
7617 }
7618 v.reset(OpAMD64InvertFlags)
7619 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
7620 v0.AuxInt = int32ToAuxInt(off)
7621 v0.Aux = symToAux(sym)
7622 v0.AddArg3(ptr, x, mem)
7623 v.AddArg(v0)
7624 return true
7625 }
7626 return false
7627 }
7628 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
7629 v_0 := v.Args[0]
7630 b := v.Block
7631
7632
7633
7634 for {
7635 y := auxIntToInt16(v.AuxInt)
7636 if v_0.Op != OpAMD64MOVLconst {
7637 break
7638 }
7639 x := auxIntToInt32(v_0.AuxInt)
7640 if !(int16(x) == y) {
7641 break
7642 }
7643 v.reset(OpAMD64FlagEQ)
7644 return true
7645 }
7646
7647
7648
7649 for {
7650 y := auxIntToInt16(v.AuxInt)
7651 if v_0.Op != OpAMD64MOVLconst {
7652 break
7653 }
7654 x := auxIntToInt32(v_0.AuxInt)
7655 if !(int16(x) < y && uint16(x) < uint16(y)) {
7656 break
7657 }
7658 v.reset(OpAMD64FlagLT_ULT)
7659 return true
7660 }
7661
7662
7663
7664 for {
7665 y := auxIntToInt16(v.AuxInt)
7666 if v_0.Op != OpAMD64MOVLconst {
7667 break
7668 }
7669 x := auxIntToInt32(v_0.AuxInt)
7670 if !(int16(x) < y && uint16(x) > uint16(y)) {
7671 break
7672 }
7673 v.reset(OpAMD64FlagLT_UGT)
7674 return true
7675 }
7676
7677
7678
7679 for {
7680 y := auxIntToInt16(v.AuxInt)
7681 if v_0.Op != OpAMD64MOVLconst {
7682 break
7683 }
7684 x := auxIntToInt32(v_0.AuxInt)
7685 if !(int16(x) > y && uint16(x) < uint16(y)) {
7686 break
7687 }
7688 v.reset(OpAMD64FlagGT_ULT)
7689 return true
7690 }
7691
7692
7693
7694 for {
7695 y := auxIntToInt16(v.AuxInt)
7696 if v_0.Op != OpAMD64MOVLconst {
7697 break
7698 }
7699 x := auxIntToInt32(v_0.AuxInt)
7700 if !(int16(x) > y && uint16(x) > uint16(y)) {
7701 break
7702 }
7703 v.reset(OpAMD64FlagGT_UGT)
7704 return true
7705 }
7706
7707
7708
7709 for {
7710 n := auxIntToInt16(v.AuxInt)
7711 if v_0.Op != OpAMD64ANDLconst {
7712 break
7713 }
7714 m := auxIntToInt32(v_0.AuxInt)
7715 if !(0 <= int16(m) && int16(m) < n) {
7716 break
7717 }
7718 v.reset(OpAMD64FlagLT_ULT)
7719 return true
7720 }
7721
7722
7723
7724 for {
7725 if auxIntToInt16(v.AuxInt) != 0 {
7726 break
7727 }
7728 a := v_0
7729 if a.Op != OpAMD64ANDL {
7730 break
7731 }
7732 y := a.Args[1]
7733 x := a.Args[0]
7734 if !(a.Uses == 1) {
7735 break
7736 }
7737 v.reset(OpAMD64TESTW)
7738 v.AddArg2(x, y)
7739 return true
7740 }
7741
7742
7743
7744 for {
7745 if auxIntToInt16(v.AuxInt) != 0 {
7746 break
7747 }
7748 a := v_0
7749 if a.Op != OpAMD64ANDLconst {
7750 break
7751 }
7752 c := auxIntToInt32(a.AuxInt)
7753 x := a.Args[0]
7754 if !(a.Uses == 1) {
7755 break
7756 }
7757 v.reset(OpAMD64TESTWconst)
7758 v.AuxInt = int16ToAuxInt(int16(c))
7759 v.AddArg(x)
7760 return true
7761 }
7762
7763
7764 for {
7765 if auxIntToInt16(v.AuxInt) != 0 {
7766 break
7767 }
7768 x := v_0
7769 v.reset(OpAMD64TESTW)
7770 v.AddArg2(x, x)
7771 return true
7772 }
7773
7774
7775
7776 for {
7777 c := auxIntToInt16(v.AuxInt)
7778 l := v_0
7779 if l.Op != OpAMD64MOVWload {
7780 break
7781 }
7782 off := auxIntToInt32(l.AuxInt)
7783 sym := auxToSym(l.Aux)
7784 mem := l.Args[1]
7785 ptr := l.Args[0]
7786 if !(l.Uses == 1 && clobber(l)) {
7787 break
7788 }
7789 b = l.Block
7790 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
7791 v.copyOf(v0)
7792 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7793 v0.Aux = symToAux(sym)
7794 v0.AddArg2(ptr, mem)
7795 return true
7796 }
7797 return false
7798 }
7799 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
7800 v_1 := v.Args[1]
7801 v_0 := v.Args[0]
7802
7803
7804
7805 for {
7806 valoff1 := auxIntToValAndOff(v.AuxInt)
7807 sym := auxToSym(v.Aux)
7808 if v_0.Op != OpAMD64ADDQconst {
7809 break
7810 }
7811 off2 := auxIntToInt32(v_0.AuxInt)
7812 base := v_0.Args[0]
7813 mem := v_1
7814 if !(ValAndOff(valoff1).canAdd32(off2)) {
7815 break
7816 }
7817 v.reset(OpAMD64CMPWconstload)
7818 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7819 v.Aux = symToAux(sym)
7820 v.AddArg2(base, mem)
7821 return true
7822 }
7823
7824
7825
7826 for {
7827 valoff1 := auxIntToValAndOff(v.AuxInt)
7828 sym1 := auxToSym(v.Aux)
7829 if v_0.Op != OpAMD64LEAQ {
7830 break
7831 }
7832 off2 := auxIntToInt32(v_0.AuxInt)
7833 sym2 := auxToSym(v_0.Aux)
7834 base := v_0.Args[0]
7835 mem := v_1
7836 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7837 break
7838 }
7839 v.reset(OpAMD64CMPWconstload)
7840 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7841 v.Aux = symToAux(mergeSym(sym1, sym2))
7842 v.AddArg2(base, mem)
7843 return true
7844 }
7845 return false
7846 }
7847 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
7848 v_2 := v.Args[2]
7849 v_1 := v.Args[1]
7850 v_0 := v.Args[0]
7851
7852
7853
7854 for {
7855 off1 := auxIntToInt32(v.AuxInt)
7856 sym := auxToSym(v.Aux)
7857 if v_0.Op != OpAMD64ADDQconst {
7858 break
7859 }
7860 off2 := auxIntToInt32(v_0.AuxInt)
7861 base := v_0.Args[0]
7862 val := v_1
7863 mem := v_2
7864 if !(is32Bit(int64(off1) + int64(off2))) {
7865 break
7866 }
7867 v.reset(OpAMD64CMPWload)
7868 v.AuxInt = int32ToAuxInt(off1 + off2)
7869 v.Aux = symToAux(sym)
7870 v.AddArg3(base, val, mem)
7871 return true
7872 }
7873
7874
7875
7876 for {
7877 off1 := auxIntToInt32(v.AuxInt)
7878 sym1 := auxToSym(v.Aux)
7879 if v_0.Op != OpAMD64LEAQ {
7880 break
7881 }
7882 off2 := auxIntToInt32(v_0.AuxInt)
7883 sym2 := auxToSym(v_0.Aux)
7884 base := v_0.Args[0]
7885 val := v_1
7886 mem := v_2
7887 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7888 break
7889 }
7890 v.reset(OpAMD64CMPWload)
7891 v.AuxInt = int32ToAuxInt(off1 + off2)
7892 v.Aux = symToAux(mergeSym(sym1, sym2))
7893 v.AddArg3(base, val, mem)
7894 return true
7895 }
7896
7897
7898 for {
7899 off := auxIntToInt32(v.AuxInt)
7900 sym := auxToSym(v.Aux)
7901 ptr := v_0
7902 if v_1.Op != OpAMD64MOVLconst {
7903 break
7904 }
7905 c := auxIntToInt32(v_1.AuxInt)
7906 mem := v_2
7907 v.reset(OpAMD64CMPWconstload)
7908 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
7909 v.Aux = symToAux(sym)
7910 v.AddArg2(ptr, mem)
7911 return true
7912 }
7913 return false
7914 }
7915 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
7916 v_3 := v.Args[3]
7917 v_2 := v.Args[2]
7918 v_1 := v.Args[1]
7919 v_0 := v.Args[0]
7920
7921
7922
7923 for {
7924 off1 := auxIntToInt32(v.AuxInt)
7925 sym := auxToSym(v.Aux)
7926 if v_0.Op != OpAMD64ADDQconst {
7927 break
7928 }
7929 off2 := auxIntToInt32(v_0.AuxInt)
7930 ptr := v_0.Args[0]
7931 old := v_1
7932 new_ := v_2
7933 mem := v_3
7934 if !(is32Bit(int64(off1) + int64(off2))) {
7935 break
7936 }
7937 v.reset(OpAMD64CMPXCHGLlock)
7938 v.AuxInt = int32ToAuxInt(off1 + off2)
7939 v.Aux = symToAux(sym)
7940 v.AddArg4(ptr, old, new_, mem)
7941 return true
7942 }
7943 return false
7944 }
7945 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
7946 v_3 := v.Args[3]
7947 v_2 := v.Args[2]
7948 v_1 := v.Args[1]
7949 v_0 := v.Args[0]
7950
7951
7952
7953 for {
7954 off1 := auxIntToInt32(v.AuxInt)
7955 sym := auxToSym(v.Aux)
7956 if v_0.Op != OpAMD64ADDQconst {
7957 break
7958 }
7959 off2 := auxIntToInt32(v_0.AuxInt)
7960 ptr := v_0.Args[0]
7961 old := v_1
7962 new_ := v_2
7963 mem := v_3
7964 if !(is32Bit(int64(off1) + int64(off2))) {
7965 break
7966 }
7967 v.reset(OpAMD64CMPXCHGQlock)
7968 v.AuxInt = int32ToAuxInt(off1 + off2)
7969 v.Aux = symToAux(sym)
7970 v.AddArg4(ptr, old, new_, mem)
7971 return true
7972 }
7973 return false
7974 }
7975 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
7976 v_1 := v.Args[1]
7977 v_0 := v.Args[0]
7978
7979
7980
7981 for {
7982 x := v_0
7983 l := v_1
7984 if l.Op != OpAMD64MOVSDload {
7985 break
7986 }
7987 off := auxIntToInt32(l.AuxInt)
7988 sym := auxToSym(l.Aux)
7989 mem := l.Args[1]
7990 ptr := l.Args[0]
7991 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
7992 break
7993 }
7994 v.reset(OpAMD64DIVSDload)
7995 v.AuxInt = int32ToAuxInt(off)
7996 v.Aux = symToAux(sym)
7997 v.AddArg3(x, ptr, mem)
7998 return true
7999 }
8000 return false
8001 }
8002 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8003 v_2 := v.Args[2]
8004 v_1 := v.Args[1]
8005 v_0 := v.Args[0]
8006
8007
8008
8009 for {
8010 off1 := auxIntToInt32(v.AuxInt)
8011 sym := auxToSym(v.Aux)
8012 val := v_0
8013 if v_1.Op != OpAMD64ADDQconst {
8014 break
8015 }
8016 off2 := auxIntToInt32(v_1.AuxInt)
8017 base := v_1.Args[0]
8018 mem := v_2
8019 if !(is32Bit(int64(off1) + int64(off2))) {
8020 break
8021 }
8022 v.reset(OpAMD64DIVSDload)
8023 v.AuxInt = int32ToAuxInt(off1 + off2)
8024 v.Aux = symToAux(sym)
8025 v.AddArg3(val, base, mem)
8026 return true
8027 }
8028
8029
8030
8031 for {
8032 off1 := auxIntToInt32(v.AuxInt)
8033 sym1 := auxToSym(v.Aux)
8034 val := v_0
8035 if v_1.Op != OpAMD64LEAQ {
8036 break
8037 }
8038 off2 := auxIntToInt32(v_1.AuxInt)
8039 sym2 := auxToSym(v_1.Aux)
8040 base := v_1.Args[0]
8041 mem := v_2
8042 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8043 break
8044 }
8045 v.reset(OpAMD64DIVSDload)
8046 v.AuxInt = int32ToAuxInt(off1 + off2)
8047 v.Aux = symToAux(mergeSym(sym1, sym2))
8048 v.AddArg3(val, base, mem)
8049 return true
8050 }
8051 return false
8052 }
8053 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8054 v_1 := v.Args[1]
8055 v_0 := v.Args[0]
8056
8057
8058
8059 for {
8060 x := v_0
8061 l := v_1
8062 if l.Op != OpAMD64MOVSSload {
8063 break
8064 }
8065 off := auxIntToInt32(l.AuxInt)
8066 sym := auxToSym(l.Aux)
8067 mem := l.Args[1]
8068 ptr := l.Args[0]
8069 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8070 break
8071 }
8072 v.reset(OpAMD64DIVSSload)
8073 v.AuxInt = int32ToAuxInt(off)
8074 v.Aux = symToAux(sym)
8075 v.AddArg3(x, ptr, mem)
8076 return true
8077 }
8078 return false
8079 }
8080 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8081 v_2 := v.Args[2]
8082 v_1 := v.Args[1]
8083 v_0 := v.Args[0]
8084
8085
8086
8087 for {
8088 off1 := auxIntToInt32(v.AuxInt)
8089 sym := auxToSym(v.Aux)
8090 val := v_0
8091 if v_1.Op != OpAMD64ADDQconst {
8092 break
8093 }
8094 off2 := auxIntToInt32(v_1.AuxInt)
8095 base := v_1.Args[0]
8096 mem := v_2
8097 if !(is32Bit(int64(off1) + int64(off2))) {
8098 break
8099 }
8100 v.reset(OpAMD64DIVSSload)
8101 v.AuxInt = int32ToAuxInt(off1 + off2)
8102 v.Aux = symToAux(sym)
8103 v.AddArg3(val, base, mem)
8104 return true
8105 }
8106
8107
8108
8109 for {
8110 off1 := auxIntToInt32(v.AuxInt)
8111 sym1 := auxToSym(v.Aux)
8112 val := v_0
8113 if v_1.Op != OpAMD64LEAQ {
8114 break
8115 }
8116 off2 := auxIntToInt32(v_1.AuxInt)
8117 sym2 := auxToSym(v_1.Aux)
8118 base := v_1.Args[0]
8119 mem := v_2
8120 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8121 break
8122 }
8123 v.reset(OpAMD64DIVSSload)
8124 v.AuxInt = int32ToAuxInt(off1 + off2)
8125 v.Aux = symToAux(mergeSym(sym1, sym2))
8126 v.AddArg3(val, base, mem)
8127 return true
8128 }
8129 return false
8130 }
8131 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8132 v_1 := v.Args[1]
8133 v_0 := v.Args[0]
8134
8135
8136
8137 for {
8138 x := v_0
8139 y := v_1
8140 if !(!x.rematerializeable() && y.rematerializeable()) {
8141 break
8142 }
8143 v.reset(OpAMD64HMULL)
8144 v.AddArg2(y, x)
8145 return true
8146 }
8147 return false
8148 }
8149 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8150 v_1 := v.Args[1]
8151 v_0 := v.Args[0]
8152
8153
8154
8155 for {
8156 x := v_0
8157 y := v_1
8158 if !(!x.rematerializeable() && y.rematerializeable()) {
8159 break
8160 }
8161 v.reset(OpAMD64HMULLU)
8162 v.AddArg2(y, x)
8163 return true
8164 }
8165 return false
8166 }
8167 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8168 v_1 := v.Args[1]
8169 v_0 := v.Args[0]
8170
8171
8172
8173 for {
8174 x := v_0
8175 y := v_1
8176 if !(!x.rematerializeable() && y.rematerializeable()) {
8177 break
8178 }
8179 v.reset(OpAMD64HMULQ)
8180 v.AddArg2(y, x)
8181 return true
8182 }
8183 return false
8184 }
8185 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8186 v_1 := v.Args[1]
8187 v_0 := v.Args[0]
8188
8189
8190
8191 for {
8192 x := v_0
8193 y := v_1
8194 if !(!x.rematerializeable() && y.rematerializeable()) {
8195 break
8196 }
8197 v.reset(OpAMD64HMULQU)
8198 v.AddArg2(y, x)
8199 return true
8200 }
8201 return false
8202 }
8203 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8204 v_0 := v.Args[0]
8205
8206
8207
8208 for {
8209 c := auxIntToInt32(v.AuxInt)
8210 s := auxToSym(v.Aux)
8211 if v_0.Op != OpAMD64ADDLconst {
8212 break
8213 }
8214 d := auxIntToInt32(v_0.AuxInt)
8215 x := v_0.Args[0]
8216 if !(is32Bit(int64(c) + int64(d))) {
8217 break
8218 }
8219 v.reset(OpAMD64LEAL)
8220 v.AuxInt = int32ToAuxInt(c + d)
8221 v.Aux = symToAux(s)
8222 v.AddArg(x)
8223 return true
8224 }
8225
8226
8227
8228 for {
8229 c := auxIntToInt32(v.AuxInt)
8230 s := auxToSym(v.Aux)
8231 if v_0.Op != OpAMD64ADDL {
8232 break
8233 }
8234 _ = v_0.Args[1]
8235 v_0_0 := v_0.Args[0]
8236 v_0_1 := v_0.Args[1]
8237 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8238 x := v_0_0
8239 y := v_0_1
8240 if !(x.Op != OpSB && y.Op != OpSB) {
8241 continue
8242 }
8243 v.reset(OpAMD64LEAL1)
8244 v.AuxInt = int32ToAuxInt(c)
8245 v.Aux = symToAux(s)
8246 v.AddArg2(x, y)
8247 return true
8248 }
8249 break
8250 }
8251 return false
8252 }
8253 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8254 v_1 := v.Args[1]
8255 v_0 := v.Args[0]
8256
8257
8258
8259 for {
8260 c := auxIntToInt32(v.AuxInt)
8261 s := auxToSym(v.Aux)
8262 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8263 if v_0.Op != OpAMD64ADDLconst {
8264 continue
8265 }
8266 d := auxIntToInt32(v_0.AuxInt)
8267 x := v_0.Args[0]
8268 y := v_1
8269 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8270 continue
8271 }
8272 v.reset(OpAMD64LEAL1)
8273 v.AuxInt = int32ToAuxInt(c + d)
8274 v.Aux = symToAux(s)
8275 v.AddArg2(x, y)
8276 return true
8277 }
8278 break
8279 }
8280
8281
8282 for {
8283 c := auxIntToInt32(v.AuxInt)
8284 s := auxToSym(v.Aux)
8285 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8286 x := v_0
8287 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8288 continue
8289 }
8290 y := v_1.Args[0]
8291 v.reset(OpAMD64LEAL2)
8292 v.AuxInt = int32ToAuxInt(c)
8293 v.Aux = symToAux(s)
8294 v.AddArg2(x, y)
8295 return true
8296 }
8297 break
8298 }
8299
8300
8301 for {
8302 c := auxIntToInt32(v.AuxInt)
8303 s := auxToSym(v.Aux)
8304 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8305 x := v_0
8306 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8307 continue
8308 }
8309 y := v_1.Args[0]
8310 v.reset(OpAMD64LEAL4)
8311 v.AuxInt = int32ToAuxInt(c)
8312 v.Aux = symToAux(s)
8313 v.AddArg2(x, y)
8314 return true
8315 }
8316 break
8317 }
8318
8319
8320 for {
8321 c := auxIntToInt32(v.AuxInt)
8322 s := auxToSym(v.Aux)
8323 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8324 x := v_0
8325 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8326 continue
8327 }
8328 y := v_1.Args[0]
8329 v.reset(OpAMD64LEAL8)
8330 v.AuxInt = int32ToAuxInt(c)
8331 v.Aux = symToAux(s)
8332 v.AddArg2(x, y)
8333 return true
8334 }
8335 break
8336 }
8337 return false
8338 }
8339 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8340 v_1 := v.Args[1]
8341 v_0 := v.Args[0]
8342
8343
8344
8345 for {
8346 c := auxIntToInt32(v.AuxInt)
8347 s := auxToSym(v.Aux)
8348 if v_0.Op != OpAMD64ADDLconst {
8349 break
8350 }
8351 d := auxIntToInt32(v_0.AuxInt)
8352 x := v_0.Args[0]
8353 y := v_1
8354 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8355 break
8356 }
8357 v.reset(OpAMD64LEAL2)
8358 v.AuxInt = int32ToAuxInt(c + d)
8359 v.Aux = symToAux(s)
8360 v.AddArg2(x, y)
8361 return true
8362 }
8363
8364
8365
8366 for {
8367 c := auxIntToInt32(v.AuxInt)
8368 s := auxToSym(v.Aux)
8369 x := v_0
8370 if v_1.Op != OpAMD64ADDLconst {
8371 break
8372 }
8373 d := auxIntToInt32(v_1.AuxInt)
8374 y := v_1.Args[0]
8375 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8376 break
8377 }
8378 v.reset(OpAMD64LEAL2)
8379 v.AuxInt = int32ToAuxInt(c + 2*d)
8380 v.Aux = symToAux(s)
8381 v.AddArg2(x, y)
8382 return true
8383 }
8384
8385
8386 for {
8387 c := auxIntToInt32(v.AuxInt)
8388 s := auxToSym(v.Aux)
8389 x := v_0
8390 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8391 break
8392 }
8393 y := v_1.Args[0]
8394 v.reset(OpAMD64LEAL4)
8395 v.AuxInt = int32ToAuxInt(c)
8396 v.Aux = symToAux(s)
8397 v.AddArg2(x, y)
8398 return true
8399 }
8400
8401
8402 for {
8403 c := auxIntToInt32(v.AuxInt)
8404 s := auxToSym(v.Aux)
8405 x := v_0
8406 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8407 break
8408 }
8409 y := v_1.Args[0]
8410 v.reset(OpAMD64LEAL8)
8411 v.AuxInt = int32ToAuxInt(c)
8412 v.Aux = symToAux(s)
8413 v.AddArg2(x, y)
8414 return true
8415 }
8416 return false
8417 }
8418 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8419 v_1 := v.Args[1]
8420 v_0 := v.Args[0]
8421
8422
8423
8424 for {
8425 c := auxIntToInt32(v.AuxInt)
8426 s := auxToSym(v.Aux)
8427 if v_0.Op != OpAMD64ADDLconst {
8428 break
8429 }
8430 d := auxIntToInt32(v_0.AuxInt)
8431 x := v_0.Args[0]
8432 y := v_1
8433 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8434 break
8435 }
8436 v.reset(OpAMD64LEAL4)
8437 v.AuxInt = int32ToAuxInt(c + d)
8438 v.Aux = symToAux(s)
8439 v.AddArg2(x, y)
8440 return true
8441 }
8442
8443
8444
8445 for {
8446 c := auxIntToInt32(v.AuxInt)
8447 s := auxToSym(v.Aux)
8448 x := v_0
8449 if v_1.Op != OpAMD64ADDLconst {
8450 break
8451 }
8452 d := auxIntToInt32(v_1.AuxInt)
8453 y := v_1.Args[0]
8454 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
8455 break
8456 }
8457 v.reset(OpAMD64LEAL4)
8458 v.AuxInt = int32ToAuxInt(c + 4*d)
8459 v.Aux = symToAux(s)
8460 v.AddArg2(x, y)
8461 return true
8462 }
8463
8464
8465 for {
8466 c := auxIntToInt32(v.AuxInt)
8467 s := auxToSym(v.Aux)
8468 x := v_0
8469 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8470 break
8471 }
8472 y := v_1.Args[0]
8473 v.reset(OpAMD64LEAL8)
8474 v.AuxInt = int32ToAuxInt(c)
8475 v.Aux = symToAux(s)
8476 v.AddArg2(x, y)
8477 return true
8478 }
8479 return false
8480 }
8481 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8482 v_1 := v.Args[1]
8483 v_0 := v.Args[0]
8484
8485
8486
8487 for {
8488 c := auxIntToInt32(v.AuxInt)
8489 s := auxToSym(v.Aux)
8490 if v_0.Op != OpAMD64ADDLconst {
8491 break
8492 }
8493 d := auxIntToInt32(v_0.AuxInt)
8494 x := v_0.Args[0]
8495 y := v_1
8496 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8497 break
8498 }
8499 v.reset(OpAMD64LEAL8)
8500 v.AuxInt = int32ToAuxInt(c + d)
8501 v.Aux = symToAux(s)
8502 v.AddArg2(x, y)
8503 return true
8504 }
8505
8506
8507
8508 for {
8509 c := auxIntToInt32(v.AuxInt)
8510 s := auxToSym(v.Aux)
8511 x := v_0
8512 if v_1.Op != OpAMD64ADDLconst {
8513 break
8514 }
8515 d := auxIntToInt32(v_1.AuxInt)
8516 y := v_1.Args[0]
8517 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
8518 break
8519 }
8520 v.reset(OpAMD64LEAL8)
8521 v.AuxInt = int32ToAuxInt(c + 8*d)
8522 v.Aux = symToAux(s)
8523 v.AddArg2(x, y)
8524 return true
8525 }
8526 return false
8527 }
8528 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
8529 v_0 := v.Args[0]
8530
8531
8532
8533 for {
8534 c := auxIntToInt32(v.AuxInt)
8535 s := auxToSym(v.Aux)
8536 if v_0.Op != OpAMD64ADDQconst {
8537 break
8538 }
8539 d := auxIntToInt32(v_0.AuxInt)
8540 x := v_0.Args[0]
8541 if !(is32Bit(int64(c) + int64(d))) {
8542 break
8543 }
8544 v.reset(OpAMD64LEAQ)
8545 v.AuxInt = int32ToAuxInt(c + d)
8546 v.Aux = symToAux(s)
8547 v.AddArg(x)
8548 return true
8549 }
8550
8551
8552
8553 for {
8554 c := auxIntToInt32(v.AuxInt)
8555 s := auxToSym(v.Aux)
8556 if v_0.Op != OpAMD64ADDQ {
8557 break
8558 }
8559 _ = v_0.Args[1]
8560 v_0_0 := v_0.Args[0]
8561 v_0_1 := v_0.Args[1]
8562 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8563 x := v_0_0
8564 y := v_0_1
8565 if !(x.Op != OpSB && y.Op != OpSB) {
8566 continue
8567 }
8568 v.reset(OpAMD64LEAQ1)
8569 v.AuxInt = int32ToAuxInt(c)
8570 v.Aux = symToAux(s)
8571 v.AddArg2(x, y)
8572 return true
8573 }
8574 break
8575 }
8576
8577
8578
8579 for {
8580 off1 := auxIntToInt32(v.AuxInt)
8581 sym1 := auxToSym(v.Aux)
8582 if v_0.Op != OpAMD64LEAQ {
8583 break
8584 }
8585 off2 := auxIntToInt32(v_0.AuxInt)
8586 sym2 := auxToSym(v_0.Aux)
8587 x := v_0.Args[0]
8588 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8589 break
8590 }
8591 v.reset(OpAMD64LEAQ)
8592 v.AuxInt = int32ToAuxInt(off1 + off2)
8593 v.Aux = symToAux(mergeSym(sym1, sym2))
8594 v.AddArg(x)
8595 return true
8596 }
8597
8598
8599
8600 for {
8601 off1 := auxIntToInt32(v.AuxInt)
8602 sym1 := auxToSym(v.Aux)
8603 if v_0.Op != OpAMD64LEAQ1 {
8604 break
8605 }
8606 off2 := auxIntToInt32(v_0.AuxInt)
8607 sym2 := auxToSym(v_0.Aux)
8608 y := v_0.Args[1]
8609 x := v_0.Args[0]
8610 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8611 break
8612 }
8613 v.reset(OpAMD64LEAQ1)
8614 v.AuxInt = int32ToAuxInt(off1 + off2)
8615 v.Aux = symToAux(mergeSym(sym1, sym2))
8616 v.AddArg2(x, y)
8617 return true
8618 }
8619
8620
8621
8622 for {
8623 off1 := auxIntToInt32(v.AuxInt)
8624 sym1 := auxToSym(v.Aux)
8625 if v_0.Op != OpAMD64LEAQ2 {
8626 break
8627 }
8628 off2 := auxIntToInt32(v_0.AuxInt)
8629 sym2 := auxToSym(v_0.Aux)
8630 y := v_0.Args[1]
8631 x := v_0.Args[0]
8632 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8633 break
8634 }
8635 v.reset(OpAMD64LEAQ2)
8636 v.AuxInt = int32ToAuxInt(off1 + off2)
8637 v.Aux = symToAux(mergeSym(sym1, sym2))
8638 v.AddArg2(x, y)
8639 return true
8640 }
8641
8642
8643
8644 for {
8645 off1 := auxIntToInt32(v.AuxInt)
8646 sym1 := auxToSym(v.Aux)
8647 if v_0.Op != OpAMD64LEAQ4 {
8648 break
8649 }
8650 off2 := auxIntToInt32(v_0.AuxInt)
8651 sym2 := auxToSym(v_0.Aux)
8652 y := v_0.Args[1]
8653 x := v_0.Args[0]
8654 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8655 break
8656 }
8657 v.reset(OpAMD64LEAQ4)
8658 v.AuxInt = int32ToAuxInt(off1 + off2)
8659 v.Aux = symToAux(mergeSym(sym1, sym2))
8660 v.AddArg2(x, y)
8661 return true
8662 }
8663
8664
8665
8666 for {
8667 off1 := auxIntToInt32(v.AuxInt)
8668 sym1 := auxToSym(v.Aux)
8669 if v_0.Op != OpAMD64LEAQ8 {
8670 break
8671 }
8672 off2 := auxIntToInt32(v_0.AuxInt)
8673 sym2 := auxToSym(v_0.Aux)
8674 y := v_0.Args[1]
8675 x := v_0.Args[0]
8676 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8677 break
8678 }
8679 v.reset(OpAMD64LEAQ8)
8680 v.AuxInt = int32ToAuxInt(off1 + off2)
8681 v.Aux = symToAux(mergeSym(sym1, sym2))
8682 v.AddArg2(x, y)
8683 return true
8684 }
8685 return false
8686 }
8687 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
8688 v_1 := v.Args[1]
8689 v_0 := v.Args[0]
8690
8691
8692
8693 for {
8694 c := auxIntToInt32(v.AuxInt)
8695 s := auxToSym(v.Aux)
8696 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8697 if v_0.Op != OpAMD64ADDQconst {
8698 continue
8699 }
8700 d := auxIntToInt32(v_0.AuxInt)
8701 x := v_0.Args[0]
8702 y := v_1
8703 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8704 continue
8705 }
8706 v.reset(OpAMD64LEAQ1)
8707 v.AuxInt = int32ToAuxInt(c + d)
8708 v.Aux = symToAux(s)
8709 v.AddArg2(x, y)
8710 return true
8711 }
8712 break
8713 }
8714
8715
8716 for {
8717 c := auxIntToInt32(v.AuxInt)
8718 s := auxToSym(v.Aux)
8719 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8720 x := v_0
8721 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8722 continue
8723 }
8724 y := v_1.Args[0]
8725 v.reset(OpAMD64LEAQ2)
8726 v.AuxInt = int32ToAuxInt(c)
8727 v.Aux = symToAux(s)
8728 v.AddArg2(x, y)
8729 return true
8730 }
8731 break
8732 }
8733
8734
8735 for {
8736 c := auxIntToInt32(v.AuxInt)
8737 s := auxToSym(v.Aux)
8738 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8739 x := v_0
8740 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8741 continue
8742 }
8743 y := v_1.Args[0]
8744 v.reset(OpAMD64LEAQ4)
8745 v.AuxInt = int32ToAuxInt(c)
8746 v.Aux = symToAux(s)
8747 v.AddArg2(x, y)
8748 return true
8749 }
8750 break
8751 }
8752
8753
8754 for {
8755 c := auxIntToInt32(v.AuxInt)
8756 s := auxToSym(v.Aux)
8757 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8758 x := v_0
8759 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
8760 continue
8761 }
8762 y := v_1.Args[0]
8763 v.reset(OpAMD64LEAQ8)
8764 v.AuxInt = int32ToAuxInt(c)
8765 v.Aux = symToAux(s)
8766 v.AddArg2(x, y)
8767 return true
8768 }
8769 break
8770 }
8771
8772
8773
8774 for {
8775 off1 := auxIntToInt32(v.AuxInt)
8776 sym1 := auxToSym(v.Aux)
8777 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8778 if v_0.Op != OpAMD64LEAQ {
8779 continue
8780 }
8781 off2 := auxIntToInt32(v_0.AuxInt)
8782 sym2 := auxToSym(v_0.Aux)
8783 x := v_0.Args[0]
8784 y := v_1
8785 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8786 continue
8787 }
8788 v.reset(OpAMD64LEAQ1)
8789 v.AuxInt = int32ToAuxInt(off1 + off2)
8790 v.Aux = symToAux(mergeSym(sym1, sym2))
8791 v.AddArg2(x, y)
8792 return true
8793 }
8794 break
8795 }
8796
8797
8798
8799 for {
8800 off1 := auxIntToInt32(v.AuxInt)
8801 sym1 := auxToSym(v.Aux)
8802 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8803 x := v_0
8804 if v_1.Op != OpAMD64LEAQ1 {
8805 continue
8806 }
8807 off2 := auxIntToInt32(v_1.AuxInt)
8808 sym2 := auxToSym(v_1.Aux)
8809 y := v_1.Args[1]
8810 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8811 continue
8812 }
8813 v.reset(OpAMD64LEAQ2)
8814 v.AuxInt = int32ToAuxInt(off1 + off2)
8815 v.Aux = symToAux(mergeSym(sym1, sym2))
8816 v.AddArg2(x, y)
8817 return true
8818 }
8819 break
8820 }
8821
8822
8823
8824 for {
8825 off1 := auxIntToInt32(v.AuxInt)
8826 sym1 := auxToSym(v.Aux)
8827 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8828 x := v_0
8829 if v_1.Op != OpAMD64LEAQ1 {
8830 continue
8831 }
8832 off2 := auxIntToInt32(v_1.AuxInt)
8833 sym2 := auxToSym(v_1.Aux)
8834 _ = v_1.Args[1]
8835 v_1_0 := v_1.Args[0]
8836 v_1_1 := v_1.Args[1]
8837 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
8838 if x != v_1_0 {
8839 continue
8840 }
8841 y := v_1_1
8842 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8843 continue
8844 }
8845 v.reset(OpAMD64LEAQ2)
8846 v.AuxInt = int32ToAuxInt(off1 + off2)
8847 v.Aux = symToAux(mergeSym(sym1, sym2))
8848 v.AddArg2(y, x)
8849 return true
8850 }
8851 }
8852 break
8853 }
8854
8855
8856
8857 for {
8858 if auxIntToInt32(v.AuxInt) != 0 {
8859 break
8860 }
8861 x := v_0
8862 y := v_1
8863 if !(v.Aux == nil) {
8864 break
8865 }
8866 v.reset(OpAMD64ADDQ)
8867 v.AddArg2(x, y)
8868 return true
8869 }
8870 return false
8871 }
8872 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
8873 v_1 := v.Args[1]
8874 v_0 := v.Args[0]
8875
8876
8877
8878 for {
8879 c := auxIntToInt32(v.AuxInt)
8880 s := auxToSym(v.Aux)
8881 if v_0.Op != OpAMD64ADDQconst {
8882 break
8883 }
8884 d := auxIntToInt32(v_0.AuxInt)
8885 x := v_0.Args[0]
8886 y := v_1
8887 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8888 break
8889 }
8890 v.reset(OpAMD64LEAQ2)
8891 v.AuxInt = int32ToAuxInt(c + d)
8892 v.Aux = symToAux(s)
8893 v.AddArg2(x, y)
8894 return true
8895 }
8896
8897
8898
8899 for {
8900 c := auxIntToInt32(v.AuxInt)
8901 s := auxToSym(v.Aux)
8902 x := v_0
8903 if v_1.Op != OpAMD64ADDQconst {
8904 break
8905 }
8906 d := auxIntToInt32(v_1.AuxInt)
8907 y := v_1.Args[0]
8908 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8909 break
8910 }
8911 v.reset(OpAMD64LEAQ2)
8912 v.AuxInt = int32ToAuxInt(c + 2*d)
8913 v.Aux = symToAux(s)
8914 v.AddArg2(x, y)
8915 return true
8916 }
8917
8918
8919 for {
8920 c := auxIntToInt32(v.AuxInt)
8921 s := auxToSym(v.Aux)
8922 x := v_0
8923 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8924 break
8925 }
8926 y := v_1.Args[0]
8927 v.reset(OpAMD64LEAQ4)
8928 v.AuxInt = int32ToAuxInt(c)
8929 v.Aux = symToAux(s)
8930 v.AddArg2(x, y)
8931 return true
8932 }
8933
8934
8935 for {
8936 c := auxIntToInt32(v.AuxInt)
8937 s := auxToSym(v.Aux)
8938 x := v_0
8939 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8940 break
8941 }
8942 y := v_1.Args[0]
8943 v.reset(OpAMD64LEAQ8)
8944 v.AuxInt = int32ToAuxInt(c)
8945 v.Aux = symToAux(s)
8946 v.AddArg2(x, y)
8947 return true
8948 }
8949
8950
8951
8952 for {
8953 off1 := auxIntToInt32(v.AuxInt)
8954 sym1 := auxToSym(v.Aux)
8955 if v_0.Op != OpAMD64LEAQ {
8956 break
8957 }
8958 off2 := auxIntToInt32(v_0.AuxInt)
8959 sym2 := auxToSym(v_0.Aux)
8960 x := v_0.Args[0]
8961 y := v_1
8962 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8963 break
8964 }
8965 v.reset(OpAMD64LEAQ2)
8966 v.AuxInt = int32ToAuxInt(off1 + off2)
8967 v.Aux = symToAux(mergeSym(sym1, sym2))
8968 v.AddArg2(x, y)
8969 return true
8970 }
8971
8972
8973
8974 for {
8975 off1 := auxIntToInt32(v.AuxInt)
8976 sym1 := auxToSym(v.Aux)
8977 x := v_0
8978 if v_1.Op != OpAMD64LEAQ1 {
8979 break
8980 }
8981 off2 := auxIntToInt32(v_1.AuxInt)
8982 sym2 := auxToSym(v_1.Aux)
8983 y := v_1.Args[1]
8984 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
8985 break
8986 }
8987 v.reset(OpAMD64LEAQ4)
8988 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
8989 v.Aux = symToAux(sym1)
8990 v.AddArg2(x, y)
8991 return true
8992 }
8993
8994
8995
8996 for {
8997 off := auxIntToInt32(v.AuxInt)
8998 sym := auxToSym(v.Aux)
8999 x := v_0
9000 if v_1.Op != OpAMD64MOVQconst {
9001 break
9002 }
9003 scale := auxIntToInt64(v_1.AuxInt)
9004 if !(is32Bit(int64(off) + int64(scale)*2)) {
9005 break
9006 }
9007 v.reset(OpAMD64LEAQ)
9008 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9009 v.Aux = symToAux(sym)
9010 v.AddArg(x)
9011 return true
9012 }
9013
9014
9015
9016 for {
9017 off := auxIntToInt32(v.AuxInt)
9018 sym := auxToSym(v.Aux)
9019 x := v_0
9020 if v_1.Op != OpAMD64MOVLconst {
9021 break
9022 }
9023 scale := auxIntToInt32(v_1.AuxInt)
9024 if !(is32Bit(int64(off) + int64(scale)*2)) {
9025 break
9026 }
9027 v.reset(OpAMD64LEAQ)
9028 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9029 v.Aux = symToAux(sym)
9030 v.AddArg(x)
9031 return true
9032 }
9033 return false
9034 }
9035 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9036 v_1 := v.Args[1]
9037 v_0 := v.Args[0]
9038
9039
9040
9041 for {
9042 c := auxIntToInt32(v.AuxInt)
9043 s := auxToSym(v.Aux)
9044 if v_0.Op != OpAMD64ADDQconst {
9045 break
9046 }
9047 d := auxIntToInt32(v_0.AuxInt)
9048 x := v_0.Args[0]
9049 y := v_1
9050 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9051 break
9052 }
9053 v.reset(OpAMD64LEAQ4)
9054 v.AuxInt = int32ToAuxInt(c + d)
9055 v.Aux = symToAux(s)
9056 v.AddArg2(x, y)
9057 return true
9058 }
9059
9060
9061
9062 for {
9063 c := auxIntToInt32(v.AuxInt)
9064 s := auxToSym(v.Aux)
9065 x := v_0
9066 if v_1.Op != OpAMD64ADDQconst {
9067 break
9068 }
9069 d := auxIntToInt32(v_1.AuxInt)
9070 y := v_1.Args[0]
9071 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9072 break
9073 }
9074 v.reset(OpAMD64LEAQ4)
9075 v.AuxInt = int32ToAuxInt(c + 4*d)
9076 v.Aux = symToAux(s)
9077 v.AddArg2(x, y)
9078 return true
9079 }
9080
9081
9082 for {
9083 c := auxIntToInt32(v.AuxInt)
9084 s := auxToSym(v.Aux)
9085 x := v_0
9086 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9087 break
9088 }
9089 y := v_1.Args[0]
9090 v.reset(OpAMD64LEAQ8)
9091 v.AuxInt = int32ToAuxInt(c)
9092 v.Aux = symToAux(s)
9093 v.AddArg2(x, y)
9094 return true
9095 }
9096
9097
9098
9099 for {
9100 off1 := auxIntToInt32(v.AuxInt)
9101 sym1 := auxToSym(v.Aux)
9102 if v_0.Op != OpAMD64LEAQ {
9103 break
9104 }
9105 off2 := auxIntToInt32(v_0.AuxInt)
9106 sym2 := auxToSym(v_0.Aux)
9107 x := v_0.Args[0]
9108 y := v_1
9109 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9110 break
9111 }
9112 v.reset(OpAMD64LEAQ4)
9113 v.AuxInt = int32ToAuxInt(off1 + off2)
9114 v.Aux = symToAux(mergeSym(sym1, sym2))
9115 v.AddArg2(x, y)
9116 return true
9117 }
9118
9119
9120
9121 for {
9122 off1 := auxIntToInt32(v.AuxInt)
9123 sym1 := auxToSym(v.Aux)
9124 x := v_0
9125 if v_1.Op != OpAMD64LEAQ1 {
9126 break
9127 }
9128 off2 := auxIntToInt32(v_1.AuxInt)
9129 sym2 := auxToSym(v_1.Aux)
9130 y := v_1.Args[1]
9131 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9132 break
9133 }
9134 v.reset(OpAMD64LEAQ8)
9135 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9136 v.Aux = symToAux(sym1)
9137 v.AddArg2(x, y)
9138 return true
9139 }
9140
9141
9142
9143 for {
9144 off := auxIntToInt32(v.AuxInt)
9145 sym := auxToSym(v.Aux)
9146 x := v_0
9147 if v_1.Op != OpAMD64MOVQconst {
9148 break
9149 }
9150 scale := auxIntToInt64(v_1.AuxInt)
9151 if !(is32Bit(int64(off) + int64(scale)*4)) {
9152 break
9153 }
9154 v.reset(OpAMD64LEAQ)
9155 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9156 v.Aux = symToAux(sym)
9157 v.AddArg(x)
9158 return true
9159 }
9160
9161
9162
9163 for {
9164 off := auxIntToInt32(v.AuxInt)
9165 sym := auxToSym(v.Aux)
9166 x := v_0
9167 if v_1.Op != OpAMD64MOVLconst {
9168 break
9169 }
9170 scale := auxIntToInt32(v_1.AuxInt)
9171 if !(is32Bit(int64(off) + int64(scale)*4)) {
9172 break
9173 }
9174 v.reset(OpAMD64LEAQ)
9175 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9176 v.Aux = symToAux(sym)
9177 v.AddArg(x)
9178 return true
9179 }
9180 return false
9181 }
9182 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9183 v_1 := v.Args[1]
9184 v_0 := v.Args[0]
9185
9186
9187
9188 for {
9189 c := auxIntToInt32(v.AuxInt)
9190 s := auxToSym(v.Aux)
9191 if v_0.Op != OpAMD64ADDQconst {
9192 break
9193 }
9194 d := auxIntToInt32(v_0.AuxInt)
9195 x := v_0.Args[0]
9196 y := v_1
9197 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9198 break
9199 }
9200 v.reset(OpAMD64LEAQ8)
9201 v.AuxInt = int32ToAuxInt(c + d)
9202 v.Aux = symToAux(s)
9203 v.AddArg2(x, y)
9204 return true
9205 }
9206
9207
9208
9209 for {
9210 c := auxIntToInt32(v.AuxInt)
9211 s := auxToSym(v.Aux)
9212 x := v_0
9213 if v_1.Op != OpAMD64ADDQconst {
9214 break
9215 }
9216 d := auxIntToInt32(v_1.AuxInt)
9217 y := v_1.Args[0]
9218 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9219 break
9220 }
9221 v.reset(OpAMD64LEAQ8)
9222 v.AuxInt = int32ToAuxInt(c + 8*d)
9223 v.Aux = symToAux(s)
9224 v.AddArg2(x, y)
9225 return true
9226 }
9227
9228
9229
9230 for {
9231 off1 := auxIntToInt32(v.AuxInt)
9232 sym1 := auxToSym(v.Aux)
9233 if v_0.Op != OpAMD64LEAQ {
9234 break
9235 }
9236 off2 := auxIntToInt32(v_0.AuxInt)
9237 sym2 := auxToSym(v_0.Aux)
9238 x := v_0.Args[0]
9239 y := v_1
9240 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9241 break
9242 }
9243 v.reset(OpAMD64LEAQ8)
9244 v.AuxInt = int32ToAuxInt(off1 + off2)
9245 v.Aux = symToAux(mergeSym(sym1, sym2))
9246 v.AddArg2(x, y)
9247 return true
9248 }
9249
9250
9251
9252 for {
9253 off := auxIntToInt32(v.AuxInt)
9254 sym := auxToSym(v.Aux)
9255 x := v_0
9256 if v_1.Op != OpAMD64MOVQconst {
9257 break
9258 }
9259 scale := auxIntToInt64(v_1.AuxInt)
9260 if !(is32Bit(int64(off) + int64(scale)*8)) {
9261 break
9262 }
9263 v.reset(OpAMD64LEAQ)
9264 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9265 v.Aux = symToAux(sym)
9266 v.AddArg(x)
9267 return true
9268 }
9269
9270
9271
9272 for {
9273 off := auxIntToInt32(v.AuxInt)
9274 sym := auxToSym(v.Aux)
9275 x := v_0
9276 if v_1.Op != OpAMD64MOVLconst {
9277 break
9278 }
9279 scale := auxIntToInt32(v_1.AuxInt)
9280 if !(is32Bit(int64(off) + int64(scale)*8)) {
9281 break
9282 }
9283 v.reset(OpAMD64LEAQ)
9284 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9285 v.Aux = symToAux(sym)
9286 v.AddArg(x)
9287 return true
9288 }
9289 return false
9290 }
9291 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
9292 v_2 := v.Args[2]
9293 v_1 := v.Args[1]
9294 v_0 := v.Args[0]
9295
9296
9297
9298 for {
9299 i := auxIntToInt32(v.AuxInt)
9300 s := auxToSym(v.Aux)
9301 p := v_0
9302 x := v_1
9303 if x.Op != OpAMD64BSWAPL {
9304 break
9305 }
9306 w := x.Args[0]
9307 mem := v_2
9308 if !(x.Uses == 1) {
9309 break
9310 }
9311 v.reset(OpAMD64MOVLstore)
9312 v.AuxInt = int32ToAuxInt(i)
9313 v.Aux = symToAux(s)
9314 v.AddArg3(p, w, mem)
9315 return true
9316 }
9317 return false
9318 }
9319 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
9320 v_2 := v.Args[2]
9321 v_1 := v.Args[1]
9322 v_0 := v.Args[0]
9323
9324
9325
9326 for {
9327 i := auxIntToInt32(v.AuxInt)
9328 s := auxToSym(v.Aux)
9329 p := v_0
9330 x := v_1
9331 if x.Op != OpAMD64BSWAPQ {
9332 break
9333 }
9334 w := x.Args[0]
9335 mem := v_2
9336 if !(x.Uses == 1) {
9337 break
9338 }
9339 v.reset(OpAMD64MOVQstore)
9340 v.AuxInt = int32ToAuxInt(i)
9341 v.Aux = symToAux(s)
9342 v.AddArg3(p, w, mem)
9343 return true
9344 }
9345 return false
9346 }
9347 func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool {
9348 v_2 := v.Args[2]
9349 v_1 := v.Args[1]
9350 v_0 := v.Args[0]
9351
9352
9353
9354 for {
9355 i := auxIntToInt32(v.AuxInt)
9356 s := auxToSym(v.Aux)
9357 p := v_0
9358 x := v_1
9359 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
9360 break
9361 }
9362 w := x.Args[0]
9363 mem := v_2
9364 if !(x.Uses == 1) {
9365 break
9366 }
9367 v.reset(OpAMD64MOVWstore)
9368 v.AuxInt = int32ToAuxInt(i)
9369 v.Aux = symToAux(s)
9370 v.AddArg3(p, w, mem)
9371 return true
9372 }
9373 return false
9374 }
9375 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9376 v_0 := v.Args[0]
9377 b := v.Block
9378
9379
9380
9381 for {
9382 x := v_0
9383 if x.Op != OpAMD64MOVBload {
9384 break
9385 }
9386 off := auxIntToInt32(x.AuxInt)
9387 sym := auxToSym(x.Aux)
9388 mem := x.Args[1]
9389 ptr := x.Args[0]
9390 if !(x.Uses == 1 && clobber(x)) {
9391 break
9392 }
9393 b = x.Block
9394 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9395 v.copyOf(v0)
9396 v0.AuxInt = int32ToAuxInt(off)
9397 v0.Aux = symToAux(sym)
9398 v0.AddArg2(ptr, mem)
9399 return true
9400 }
9401
9402
9403
9404 for {
9405 x := v_0
9406 if x.Op != OpAMD64MOVWload {
9407 break
9408 }
9409 off := auxIntToInt32(x.AuxInt)
9410 sym := auxToSym(x.Aux)
9411 mem := x.Args[1]
9412 ptr := x.Args[0]
9413 if !(x.Uses == 1 && clobber(x)) {
9414 break
9415 }
9416 b = x.Block
9417 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9418 v.copyOf(v0)
9419 v0.AuxInt = int32ToAuxInt(off)
9420 v0.Aux = symToAux(sym)
9421 v0.AddArg2(ptr, mem)
9422 return true
9423 }
9424
9425
9426
9427 for {
9428 x := v_0
9429 if x.Op != OpAMD64MOVLload {
9430 break
9431 }
9432 off := auxIntToInt32(x.AuxInt)
9433 sym := auxToSym(x.Aux)
9434 mem := x.Args[1]
9435 ptr := x.Args[0]
9436 if !(x.Uses == 1 && clobber(x)) {
9437 break
9438 }
9439 b = x.Block
9440 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9441 v.copyOf(v0)
9442 v0.AuxInt = int32ToAuxInt(off)
9443 v0.Aux = symToAux(sym)
9444 v0.AddArg2(ptr, mem)
9445 return true
9446 }
9447
9448
9449
9450 for {
9451 x := v_0
9452 if x.Op != OpAMD64MOVQload {
9453 break
9454 }
9455 off := auxIntToInt32(x.AuxInt)
9456 sym := auxToSym(x.Aux)
9457 mem := x.Args[1]
9458 ptr := x.Args[0]
9459 if !(x.Uses == 1 && clobber(x)) {
9460 break
9461 }
9462 b = x.Block
9463 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9464 v.copyOf(v0)
9465 v0.AuxInt = int32ToAuxInt(off)
9466 v0.Aux = symToAux(sym)
9467 v0.AddArg2(ptr, mem)
9468 return true
9469 }
9470
9471
9472
9473 for {
9474 if v_0.Op != OpAMD64ANDLconst {
9475 break
9476 }
9477 c := auxIntToInt32(v_0.AuxInt)
9478 x := v_0.Args[0]
9479 if !(c&0x80 == 0) {
9480 break
9481 }
9482 v.reset(OpAMD64ANDLconst)
9483 v.AuxInt = int32ToAuxInt(c & 0x7f)
9484 v.AddArg(x)
9485 return true
9486 }
9487
9488
9489 for {
9490 if v_0.Op != OpAMD64MOVBQSX {
9491 break
9492 }
9493 x := v_0.Args[0]
9494 v.reset(OpAMD64MOVBQSX)
9495 v.AddArg(x)
9496 return true
9497 }
9498 return false
9499 }
9500 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9501 v_1 := v.Args[1]
9502 v_0 := v.Args[0]
9503
9504
9505
9506 for {
9507 off := auxIntToInt32(v.AuxInt)
9508 sym := auxToSym(v.Aux)
9509 ptr := v_0
9510 if v_1.Op != OpAMD64MOVBstore {
9511 break
9512 }
9513 off2 := auxIntToInt32(v_1.AuxInt)
9514 sym2 := auxToSym(v_1.Aux)
9515 x := v_1.Args[1]
9516 ptr2 := v_1.Args[0]
9517 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9518 break
9519 }
9520 v.reset(OpAMD64MOVBQSX)
9521 v.AddArg(x)
9522 return true
9523 }
9524
9525
9526
9527 for {
9528 off1 := auxIntToInt32(v.AuxInt)
9529 sym1 := auxToSym(v.Aux)
9530 if v_0.Op != OpAMD64LEAQ {
9531 break
9532 }
9533 off2 := auxIntToInt32(v_0.AuxInt)
9534 sym2 := auxToSym(v_0.Aux)
9535 base := v_0.Args[0]
9536 mem := v_1
9537 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9538 break
9539 }
9540 v.reset(OpAMD64MOVBQSXload)
9541 v.AuxInt = int32ToAuxInt(off1 + off2)
9542 v.Aux = symToAux(mergeSym(sym1, sym2))
9543 v.AddArg2(base, mem)
9544 return true
9545 }
9546 return false
9547 }
9548 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9549 v_0 := v.Args[0]
9550 b := v.Block
9551
9552
9553
9554 for {
9555 x := v_0
9556 if x.Op != OpAMD64MOVBload {
9557 break
9558 }
9559 off := auxIntToInt32(x.AuxInt)
9560 sym := auxToSym(x.Aux)
9561 mem := x.Args[1]
9562 ptr := x.Args[0]
9563 if !(x.Uses == 1 && clobber(x)) {
9564 break
9565 }
9566 b = x.Block
9567 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9568 v.copyOf(v0)
9569 v0.AuxInt = int32ToAuxInt(off)
9570 v0.Aux = symToAux(sym)
9571 v0.AddArg2(ptr, mem)
9572 return true
9573 }
9574
9575
9576
9577 for {
9578 x := v_0
9579 if x.Op != OpAMD64MOVWload {
9580 break
9581 }
9582 off := auxIntToInt32(x.AuxInt)
9583 sym := auxToSym(x.Aux)
9584 mem := x.Args[1]
9585 ptr := x.Args[0]
9586 if !(x.Uses == 1 && clobber(x)) {
9587 break
9588 }
9589 b = x.Block
9590 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9591 v.copyOf(v0)
9592 v0.AuxInt = int32ToAuxInt(off)
9593 v0.Aux = symToAux(sym)
9594 v0.AddArg2(ptr, mem)
9595 return true
9596 }
9597
9598
9599
9600 for {
9601 x := v_0
9602 if x.Op != OpAMD64MOVLload {
9603 break
9604 }
9605 off := auxIntToInt32(x.AuxInt)
9606 sym := auxToSym(x.Aux)
9607 mem := x.Args[1]
9608 ptr := x.Args[0]
9609 if !(x.Uses == 1 && clobber(x)) {
9610 break
9611 }
9612 b = x.Block
9613 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9614 v.copyOf(v0)
9615 v0.AuxInt = int32ToAuxInt(off)
9616 v0.Aux = symToAux(sym)
9617 v0.AddArg2(ptr, mem)
9618 return true
9619 }
9620
9621
9622
9623 for {
9624 x := v_0
9625 if x.Op != OpAMD64MOVQload {
9626 break
9627 }
9628 off := auxIntToInt32(x.AuxInt)
9629 sym := auxToSym(x.Aux)
9630 mem := x.Args[1]
9631 ptr := x.Args[0]
9632 if !(x.Uses == 1 && clobber(x)) {
9633 break
9634 }
9635 b = x.Block
9636 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9637 v.copyOf(v0)
9638 v0.AuxInt = int32ToAuxInt(off)
9639 v0.Aux = symToAux(sym)
9640 v0.AddArg2(ptr, mem)
9641 return true
9642 }
9643
9644
9645 for {
9646 if v_0.Op != OpAMD64ANDLconst {
9647 break
9648 }
9649 c := auxIntToInt32(v_0.AuxInt)
9650 x := v_0.Args[0]
9651 v.reset(OpAMD64ANDLconst)
9652 v.AuxInt = int32ToAuxInt(c & 0xff)
9653 v.AddArg(x)
9654 return true
9655 }
9656
9657
9658 for {
9659 if v_0.Op != OpAMD64MOVBQZX {
9660 break
9661 }
9662 x := v_0.Args[0]
9663 v.reset(OpAMD64MOVBQZX)
9664 v.AddArg(x)
9665 return true
9666 }
9667 return false
9668 }
9669 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
9670 v_1 := v.Args[1]
9671 v_0 := v.Args[0]
9672
9673
9674
9675 for {
9676 off1 := auxIntToInt32(v.AuxInt)
9677 sym := auxToSym(v.Aux)
9678 if v_0.Op != OpAMD64ADDQconst {
9679 break
9680 }
9681 off2 := auxIntToInt32(v_0.AuxInt)
9682 ptr := v_0.Args[0]
9683 mem := v_1
9684 if !(is32Bit(int64(off1) + int64(off2))) {
9685 break
9686 }
9687 v.reset(OpAMD64MOVBatomicload)
9688 v.AuxInt = int32ToAuxInt(off1 + off2)
9689 v.Aux = symToAux(sym)
9690 v.AddArg2(ptr, mem)
9691 return true
9692 }
9693
9694
9695
9696 for {
9697 off1 := auxIntToInt32(v.AuxInt)
9698 sym1 := auxToSym(v.Aux)
9699 if v_0.Op != OpAMD64LEAQ {
9700 break
9701 }
9702 off2 := auxIntToInt32(v_0.AuxInt)
9703 sym2 := auxToSym(v_0.Aux)
9704 ptr := v_0.Args[0]
9705 mem := v_1
9706 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9707 break
9708 }
9709 v.reset(OpAMD64MOVBatomicload)
9710 v.AuxInt = int32ToAuxInt(off1 + off2)
9711 v.Aux = symToAux(mergeSym(sym1, sym2))
9712 v.AddArg2(ptr, mem)
9713 return true
9714 }
9715 return false
9716 }
9717 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
9718 v_1 := v.Args[1]
9719 v_0 := v.Args[0]
9720
9721
9722
9723 for {
9724 off := auxIntToInt32(v.AuxInt)
9725 sym := auxToSym(v.Aux)
9726 ptr := v_0
9727 if v_1.Op != OpAMD64MOVBstore {
9728 break
9729 }
9730 off2 := auxIntToInt32(v_1.AuxInt)
9731 sym2 := auxToSym(v_1.Aux)
9732 x := v_1.Args[1]
9733 ptr2 := v_1.Args[0]
9734 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9735 break
9736 }
9737 v.reset(OpAMD64MOVBQZX)
9738 v.AddArg(x)
9739 return true
9740 }
9741
9742
9743
9744 for {
9745 off1 := auxIntToInt32(v.AuxInt)
9746 sym := auxToSym(v.Aux)
9747 if v_0.Op != OpAMD64ADDQconst {
9748 break
9749 }
9750 off2 := auxIntToInt32(v_0.AuxInt)
9751 ptr := v_0.Args[0]
9752 mem := v_1
9753 if !(is32Bit(int64(off1) + int64(off2))) {
9754 break
9755 }
9756 v.reset(OpAMD64MOVBload)
9757 v.AuxInt = int32ToAuxInt(off1 + off2)
9758 v.Aux = symToAux(sym)
9759 v.AddArg2(ptr, mem)
9760 return true
9761 }
9762
9763
9764
9765 for {
9766 off1 := auxIntToInt32(v.AuxInt)
9767 sym1 := auxToSym(v.Aux)
9768 if v_0.Op != OpAMD64LEAQ {
9769 break
9770 }
9771 off2 := auxIntToInt32(v_0.AuxInt)
9772 sym2 := auxToSym(v_0.Aux)
9773 base := v_0.Args[0]
9774 mem := v_1
9775 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9776 break
9777 }
9778 v.reset(OpAMD64MOVBload)
9779 v.AuxInt = int32ToAuxInt(off1 + off2)
9780 v.Aux = symToAux(mergeSym(sym1, sym2))
9781 v.AddArg2(base, mem)
9782 return true
9783 }
9784
9785
9786
9787 for {
9788 off := auxIntToInt32(v.AuxInt)
9789 sym := auxToSym(v.Aux)
9790 if v_0.Op != OpSB || !(symIsRO(sym)) {
9791 break
9792 }
9793 v.reset(OpAMD64MOVLconst)
9794 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
9795 return true
9796 }
9797 return false
9798 }
9799 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
9800 v_2 := v.Args[2]
9801 v_1 := v.Args[1]
9802 v_0 := v.Args[0]
9803
9804
9805
9806 for {
9807 off := auxIntToInt32(v.AuxInt)
9808 sym := auxToSym(v.Aux)
9809 ptr := v_0
9810 y := v_1
9811 if y.Op != OpAMD64SETL {
9812 break
9813 }
9814 x := y.Args[0]
9815 mem := v_2
9816 if !(y.Uses == 1) {
9817 break
9818 }
9819 v.reset(OpAMD64SETLstore)
9820 v.AuxInt = int32ToAuxInt(off)
9821 v.Aux = symToAux(sym)
9822 v.AddArg3(ptr, x, mem)
9823 return true
9824 }
9825
9826
9827
9828 for {
9829 off := auxIntToInt32(v.AuxInt)
9830 sym := auxToSym(v.Aux)
9831 ptr := v_0
9832 y := v_1
9833 if y.Op != OpAMD64SETLE {
9834 break
9835 }
9836 x := y.Args[0]
9837 mem := v_2
9838 if !(y.Uses == 1) {
9839 break
9840 }
9841 v.reset(OpAMD64SETLEstore)
9842 v.AuxInt = int32ToAuxInt(off)
9843 v.Aux = symToAux(sym)
9844 v.AddArg3(ptr, x, mem)
9845 return true
9846 }
9847
9848
9849
9850 for {
9851 off := auxIntToInt32(v.AuxInt)
9852 sym := auxToSym(v.Aux)
9853 ptr := v_0
9854 y := v_1
9855 if y.Op != OpAMD64SETG {
9856 break
9857 }
9858 x := y.Args[0]
9859 mem := v_2
9860 if !(y.Uses == 1) {
9861 break
9862 }
9863 v.reset(OpAMD64SETGstore)
9864 v.AuxInt = int32ToAuxInt(off)
9865 v.Aux = symToAux(sym)
9866 v.AddArg3(ptr, x, mem)
9867 return true
9868 }
9869
9870
9871
9872 for {
9873 off := auxIntToInt32(v.AuxInt)
9874 sym := auxToSym(v.Aux)
9875 ptr := v_0
9876 y := v_1
9877 if y.Op != OpAMD64SETGE {
9878 break
9879 }
9880 x := y.Args[0]
9881 mem := v_2
9882 if !(y.Uses == 1) {
9883 break
9884 }
9885 v.reset(OpAMD64SETGEstore)
9886 v.AuxInt = int32ToAuxInt(off)
9887 v.Aux = symToAux(sym)
9888 v.AddArg3(ptr, x, mem)
9889 return true
9890 }
9891
9892
9893
9894 for {
9895 off := auxIntToInt32(v.AuxInt)
9896 sym := auxToSym(v.Aux)
9897 ptr := v_0
9898 y := v_1
9899 if y.Op != OpAMD64SETEQ {
9900 break
9901 }
9902 x := y.Args[0]
9903 mem := v_2
9904 if !(y.Uses == 1) {
9905 break
9906 }
9907 v.reset(OpAMD64SETEQstore)
9908 v.AuxInt = int32ToAuxInt(off)
9909 v.Aux = symToAux(sym)
9910 v.AddArg3(ptr, x, mem)
9911 return true
9912 }
9913
9914
9915
9916 for {
9917 off := auxIntToInt32(v.AuxInt)
9918 sym := auxToSym(v.Aux)
9919 ptr := v_0
9920 y := v_1
9921 if y.Op != OpAMD64SETNE {
9922 break
9923 }
9924 x := y.Args[0]
9925 mem := v_2
9926 if !(y.Uses == 1) {
9927 break
9928 }
9929 v.reset(OpAMD64SETNEstore)
9930 v.AuxInt = int32ToAuxInt(off)
9931 v.Aux = symToAux(sym)
9932 v.AddArg3(ptr, x, mem)
9933 return true
9934 }
9935
9936
9937
9938 for {
9939 off := auxIntToInt32(v.AuxInt)
9940 sym := auxToSym(v.Aux)
9941 ptr := v_0
9942 y := v_1
9943 if y.Op != OpAMD64SETB {
9944 break
9945 }
9946 x := y.Args[0]
9947 mem := v_2
9948 if !(y.Uses == 1) {
9949 break
9950 }
9951 v.reset(OpAMD64SETBstore)
9952 v.AuxInt = int32ToAuxInt(off)
9953 v.Aux = symToAux(sym)
9954 v.AddArg3(ptr, x, mem)
9955 return true
9956 }
9957
9958
9959
9960 for {
9961 off := auxIntToInt32(v.AuxInt)
9962 sym := auxToSym(v.Aux)
9963 ptr := v_0
9964 y := v_1
9965 if y.Op != OpAMD64SETBE {
9966 break
9967 }
9968 x := y.Args[0]
9969 mem := v_2
9970 if !(y.Uses == 1) {
9971 break
9972 }
9973 v.reset(OpAMD64SETBEstore)
9974 v.AuxInt = int32ToAuxInt(off)
9975 v.Aux = symToAux(sym)
9976 v.AddArg3(ptr, x, mem)
9977 return true
9978 }
9979
9980
9981
9982 for {
9983 off := auxIntToInt32(v.AuxInt)
9984 sym := auxToSym(v.Aux)
9985 ptr := v_0
9986 y := v_1
9987 if y.Op != OpAMD64SETA {
9988 break
9989 }
9990 x := y.Args[0]
9991 mem := v_2
9992 if !(y.Uses == 1) {
9993 break
9994 }
9995 v.reset(OpAMD64SETAstore)
9996 v.AuxInt = int32ToAuxInt(off)
9997 v.Aux = symToAux(sym)
9998 v.AddArg3(ptr, x, mem)
9999 return true
10000 }
10001
10002
10003
10004 for {
10005 off := auxIntToInt32(v.AuxInt)
10006 sym := auxToSym(v.Aux)
10007 ptr := v_0
10008 y := v_1
10009 if y.Op != OpAMD64SETAE {
10010 break
10011 }
10012 x := y.Args[0]
10013 mem := v_2
10014 if !(y.Uses == 1) {
10015 break
10016 }
10017 v.reset(OpAMD64SETAEstore)
10018 v.AuxInt = int32ToAuxInt(off)
10019 v.Aux = symToAux(sym)
10020 v.AddArg3(ptr, x, mem)
10021 return true
10022 }
10023
10024
10025 for {
10026 off := auxIntToInt32(v.AuxInt)
10027 sym := auxToSym(v.Aux)
10028 ptr := v_0
10029 if v_1.Op != OpAMD64MOVBQSX {
10030 break
10031 }
10032 x := v_1.Args[0]
10033 mem := v_2
10034 v.reset(OpAMD64MOVBstore)
10035 v.AuxInt = int32ToAuxInt(off)
10036 v.Aux = symToAux(sym)
10037 v.AddArg3(ptr, x, mem)
10038 return true
10039 }
10040
10041
10042 for {
10043 off := auxIntToInt32(v.AuxInt)
10044 sym := auxToSym(v.Aux)
10045 ptr := v_0
10046 if v_1.Op != OpAMD64MOVBQZX {
10047 break
10048 }
10049 x := v_1.Args[0]
10050 mem := v_2
10051 v.reset(OpAMD64MOVBstore)
10052 v.AuxInt = int32ToAuxInt(off)
10053 v.Aux = symToAux(sym)
10054 v.AddArg3(ptr, x, mem)
10055 return true
10056 }
10057
10058
10059
10060 for {
10061 off1 := auxIntToInt32(v.AuxInt)
10062 sym := auxToSym(v.Aux)
10063 if v_0.Op != OpAMD64ADDQconst {
10064 break
10065 }
10066 off2 := auxIntToInt32(v_0.AuxInt)
10067 ptr := v_0.Args[0]
10068 val := v_1
10069 mem := v_2
10070 if !(is32Bit(int64(off1) + int64(off2))) {
10071 break
10072 }
10073 v.reset(OpAMD64MOVBstore)
10074 v.AuxInt = int32ToAuxInt(off1 + off2)
10075 v.Aux = symToAux(sym)
10076 v.AddArg3(ptr, val, mem)
10077 return true
10078 }
10079
10080
10081 for {
10082 off := auxIntToInt32(v.AuxInt)
10083 sym := auxToSym(v.Aux)
10084 ptr := v_0
10085 if v_1.Op != OpAMD64MOVLconst {
10086 break
10087 }
10088 c := auxIntToInt32(v_1.AuxInt)
10089 mem := v_2
10090 v.reset(OpAMD64MOVBstoreconst)
10091 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10092 v.Aux = symToAux(sym)
10093 v.AddArg2(ptr, mem)
10094 return true
10095 }
10096
10097
10098 for {
10099 off := auxIntToInt32(v.AuxInt)
10100 sym := auxToSym(v.Aux)
10101 ptr := v_0
10102 if v_1.Op != OpAMD64MOVQconst {
10103 break
10104 }
10105 c := auxIntToInt64(v_1.AuxInt)
10106 mem := v_2
10107 v.reset(OpAMD64MOVBstoreconst)
10108 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10109 v.Aux = symToAux(sym)
10110 v.AddArg2(ptr, mem)
10111 return true
10112 }
10113
10114
10115
10116 for {
10117 off1 := auxIntToInt32(v.AuxInt)
10118 sym1 := auxToSym(v.Aux)
10119 if v_0.Op != OpAMD64LEAQ {
10120 break
10121 }
10122 off2 := auxIntToInt32(v_0.AuxInt)
10123 sym2 := auxToSym(v_0.Aux)
10124 base := v_0.Args[0]
10125 val := v_1
10126 mem := v_2
10127 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10128 break
10129 }
10130 v.reset(OpAMD64MOVBstore)
10131 v.AuxInt = int32ToAuxInt(off1 + off2)
10132 v.Aux = symToAux(mergeSym(sym1, sym2))
10133 v.AddArg3(base, val, mem)
10134 return true
10135 }
10136 return false
10137 }
10138 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
10139 v_1 := v.Args[1]
10140 v_0 := v.Args[0]
10141
10142
10143
10144 for {
10145 sc := auxIntToValAndOff(v.AuxInt)
10146 s := auxToSym(v.Aux)
10147 if v_0.Op != OpAMD64ADDQconst {
10148 break
10149 }
10150 off := auxIntToInt32(v_0.AuxInt)
10151 ptr := v_0.Args[0]
10152 mem := v_1
10153 if !(ValAndOff(sc).canAdd32(off)) {
10154 break
10155 }
10156 v.reset(OpAMD64MOVBstoreconst)
10157 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10158 v.Aux = symToAux(s)
10159 v.AddArg2(ptr, mem)
10160 return true
10161 }
10162
10163
10164
10165 for {
10166 sc := auxIntToValAndOff(v.AuxInt)
10167 sym1 := auxToSym(v.Aux)
10168 if v_0.Op != OpAMD64LEAQ {
10169 break
10170 }
10171 off := auxIntToInt32(v_0.AuxInt)
10172 sym2 := auxToSym(v_0.Aux)
10173 ptr := v_0.Args[0]
10174 mem := v_1
10175 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
10176 break
10177 }
10178 v.reset(OpAMD64MOVBstoreconst)
10179 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10180 v.Aux = symToAux(mergeSym(sym1, sym2))
10181 v.AddArg2(ptr, mem)
10182 return true
10183 }
10184 return false
10185 }
10186 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
10187 v_0 := v.Args[0]
10188 b := v.Block
10189
10190
10191
10192 for {
10193 x := v_0
10194 if x.Op != OpAMD64MOVLload {
10195 break
10196 }
10197 off := auxIntToInt32(x.AuxInt)
10198 sym := auxToSym(x.Aux)
10199 mem := x.Args[1]
10200 ptr := x.Args[0]
10201 if !(x.Uses == 1 && clobber(x)) {
10202 break
10203 }
10204 b = x.Block
10205 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10206 v.copyOf(v0)
10207 v0.AuxInt = int32ToAuxInt(off)
10208 v0.Aux = symToAux(sym)
10209 v0.AddArg2(ptr, mem)
10210 return true
10211 }
10212
10213
10214
10215 for {
10216 x := v_0
10217 if x.Op != OpAMD64MOVQload {
10218 break
10219 }
10220 off := auxIntToInt32(x.AuxInt)
10221 sym := auxToSym(x.Aux)
10222 mem := x.Args[1]
10223 ptr := x.Args[0]
10224 if !(x.Uses == 1 && clobber(x)) {
10225 break
10226 }
10227 b = x.Block
10228 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10229 v.copyOf(v0)
10230 v0.AuxInt = int32ToAuxInt(off)
10231 v0.Aux = symToAux(sym)
10232 v0.AddArg2(ptr, mem)
10233 return true
10234 }
10235
10236
10237
10238 for {
10239 if v_0.Op != OpAMD64ANDLconst {
10240 break
10241 }
10242 c := auxIntToInt32(v_0.AuxInt)
10243 x := v_0.Args[0]
10244 if !(uint32(c)&0x80000000 == 0) {
10245 break
10246 }
10247 v.reset(OpAMD64ANDLconst)
10248 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
10249 v.AddArg(x)
10250 return true
10251 }
10252
10253
10254 for {
10255 if v_0.Op != OpAMD64MOVLQSX {
10256 break
10257 }
10258 x := v_0.Args[0]
10259 v.reset(OpAMD64MOVLQSX)
10260 v.AddArg(x)
10261 return true
10262 }
10263
10264
10265 for {
10266 if v_0.Op != OpAMD64MOVWQSX {
10267 break
10268 }
10269 x := v_0.Args[0]
10270 v.reset(OpAMD64MOVWQSX)
10271 v.AddArg(x)
10272 return true
10273 }
10274
10275
10276 for {
10277 if v_0.Op != OpAMD64MOVBQSX {
10278 break
10279 }
10280 x := v_0.Args[0]
10281 v.reset(OpAMD64MOVBQSX)
10282 v.AddArg(x)
10283 return true
10284 }
10285 return false
10286 }
10287 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
10288 v_1 := v.Args[1]
10289 v_0 := v.Args[0]
10290
10291
10292
10293 for {
10294 off := auxIntToInt32(v.AuxInt)
10295 sym := auxToSym(v.Aux)
10296 ptr := v_0
10297 if v_1.Op != OpAMD64MOVLstore {
10298 break
10299 }
10300 off2 := auxIntToInt32(v_1.AuxInt)
10301 sym2 := auxToSym(v_1.Aux)
10302 x := v_1.Args[1]
10303 ptr2 := v_1.Args[0]
10304 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10305 break
10306 }
10307 v.reset(OpAMD64MOVLQSX)
10308 v.AddArg(x)
10309 return true
10310 }
10311
10312
10313
10314 for {
10315 off1 := auxIntToInt32(v.AuxInt)
10316 sym1 := auxToSym(v.Aux)
10317 if v_0.Op != OpAMD64LEAQ {
10318 break
10319 }
10320 off2 := auxIntToInt32(v_0.AuxInt)
10321 sym2 := auxToSym(v_0.Aux)
10322 base := v_0.Args[0]
10323 mem := v_1
10324 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10325 break
10326 }
10327 v.reset(OpAMD64MOVLQSXload)
10328 v.AuxInt = int32ToAuxInt(off1 + off2)
10329 v.Aux = symToAux(mergeSym(sym1, sym2))
10330 v.AddArg2(base, mem)
10331 return true
10332 }
10333 return false
10334 }
10335 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
10336 v_0 := v.Args[0]
10337 b := v.Block
10338
10339
10340
10341 for {
10342 x := v_0
10343 if x.Op != OpAMD64MOVLload {
10344 break
10345 }
10346 off := auxIntToInt32(x.AuxInt)
10347 sym := auxToSym(x.Aux)
10348 mem := x.Args[1]
10349 ptr := x.Args[0]
10350 if !(x.Uses == 1 && clobber(x)) {
10351 break
10352 }
10353 b = x.Block
10354 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10355 v.copyOf(v0)
10356 v0.AuxInt = int32ToAuxInt(off)
10357 v0.Aux = symToAux(sym)
10358 v0.AddArg2(ptr, mem)
10359 return true
10360 }
10361
10362
10363
10364 for {
10365 x := v_0
10366 if x.Op != OpAMD64MOVQload {
10367 break
10368 }
10369 off := auxIntToInt32(x.AuxInt)
10370 sym := auxToSym(x.Aux)
10371 mem := x.Args[1]
10372 ptr := x.Args[0]
10373 if !(x.Uses == 1 && clobber(x)) {
10374 break
10375 }
10376 b = x.Block
10377 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10378 v.copyOf(v0)
10379 v0.AuxInt = int32ToAuxInt(off)
10380 v0.Aux = symToAux(sym)
10381 v0.AddArg2(ptr, mem)
10382 return true
10383 }
10384
10385
10386 for {
10387 if v_0.Op != OpAMD64ANDLconst {
10388 break
10389 }
10390 c := auxIntToInt32(v_0.AuxInt)
10391 x := v_0.Args[0]
10392 v.reset(OpAMD64ANDLconst)
10393 v.AuxInt = int32ToAuxInt(c)
10394 v.AddArg(x)
10395 return true
10396 }
10397
10398
10399 for {
10400 if v_0.Op != OpAMD64MOVLQZX {
10401 break
10402 }
10403 x := v_0.Args[0]
10404 v.reset(OpAMD64MOVLQZX)
10405 v.AddArg(x)
10406 return true
10407 }
10408
10409
10410 for {
10411 if v_0.Op != OpAMD64MOVWQZX {
10412 break
10413 }
10414 x := v_0.Args[0]
10415 v.reset(OpAMD64MOVWQZX)
10416 v.AddArg(x)
10417 return true
10418 }
10419
10420
10421 for {
10422 if v_0.Op != OpAMD64MOVBQZX {
10423 break
10424 }
10425 x := v_0.Args[0]
10426 v.reset(OpAMD64MOVBQZX)
10427 v.AddArg(x)
10428 return true
10429 }
10430 return false
10431 }
10432 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
10433 v_1 := v.Args[1]
10434 v_0 := v.Args[0]
10435
10436
10437
10438 for {
10439 off1 := auxIntToInt32(v.AuxInt)
10440 sym := auxToSym(v.Aux)
10441 if v_0.Op != OpAMD64ADDQconst {
10442 break
10443 }
10444 off2 := auxIntToInt32(v_0.AuxInt)
10445 ptr := v_0.Args[0]
10446 mem := v_1
10447 if !(is32Bit(int64(off1) + int64(off2))) {
10448 break
10449 }
10450 v.reset(OpAMD64MOVLatomicload)
10451 v.AuxInt = int32ToAuxInt(off1 + off2)
10452 v.Aux = symToAux(sym)
10453 v.AddArg2(ptr, mem)
10454 return true
10455 }
10456
10457
10458
10459 for {
10460 off1 := auxIntToInt32(v.AuxInt)
10461 sym1 := auxToSym(v.Aux)
10462 if v_0.Op != OpAMD64LEAQ {
10463 break
10464 }
10465 off2 := auxIntToInt32(v_0.AuxInt)
10466 sym2 := auxToSym(v_0.Aux)
10467 ptr := v_0.Args[0]
10468 mem := v_1
10469 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10470 break
10471 }
10472 v.reset(OpAMD64MOVLatomicload)
10473 v.AuxInt = int32ToAuxInt(off1 + off2)
10474 v.Aux = symToAux(mergeSym(sym1, sym2))
10475 v.AddArg2(ptr, mem)
10476 return true
10477 }
10478 return false
10479 }
10480 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
10481 v_0 := v.Args[0]
10482 b := v.Block
10483
10484
10485
10486 for {
10487 t := v.Type
10488 if v_0.Op != OpArg {
10489 break
10490 }
10491 u := v_0.Type
10492 off := auxIntToInt32(v_0.AuxInt)
10493 sym := auxToSym(v_0.Aux)
10494 if !(t.Size() == u.Size()) {
10495 break
10496 }
10497 b = b.Func.Entry
10498 v0 := b.NewValue0(v.Pos, OpArg, t)
10499 v.copyOf(v0)
10500 v0.AuxInt = int32ToAuxInt(off)
10501 v0.Aux = symToAux(sym)
10502 return true
10503 }
10504 return false
10505 }
10506 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
10507 v_0 := v.Args[0]
10508 b := v.Block
10509
10510
10511
10512 for {
10513 t := v.Type
10514 if v_0.Op != OpArg {
10515 break
10516 }
10517 u := v_0.Type
10518 off := auxIntToInt32(v_0.AuxInt)
10519 sym := auxToSym(v_0.Aux)
10520 if !(t.Size() == u.Size()) {
10521 break
10522 }
10523 b = b.Func.Entry
10524 v0 := b.NewValue0(v.Pos, OpArg, t)
10525 v.copyOf(v0)
10526 v0.AuxInt = int32ToAuxInt(off)
10527 v0.Aux = symToAux(sym)
10528 return true
10529 }
10530 return false
10531 }
10532 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
10533 v_1 := v.Args[1]
10534 v_0 := v.Args[0]
10535 b := v.Block
10536 config := b.Func.Config
10537
10538
10539
10540 for {
10541 off := auxIntToInt32(v.AuxInt)
10542 sym := auxToSym(v.Aux)
10543 ptr := v_0
10544 if v_1.Op != OpAMD64MOVLstore {
10545 break
10546 }
10547 off2 := auxIntToInt32(v_1.AuxInt)
10548 sym2 := auxToSym(v_1.Aux)
10549 x := v_1.Args[1]
10550 ptr2 := v_1.Args[0]
10551 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10552 break
10553 }
10554 v.reset(OpAMD64MOVLQZX)
10555 v.AddArg(x)
10556 return true
10557 }
10558
10559
10560
10561 for {
10562 off1 := auxIntToInt32(v.AuxInt)
10563 sym := auxToSym(v.Aux)
10564 if v_0.Op != OpAMD64ADDQconst {
10565 break
10566 }
10567 off2 := auxIntToInt32(v_0.AuxInt)
10568 ptr := v_0.Args[0]
10569 mem := v_1
10570 if !(is32Bit(int64(off1) + int64(off2))) {
10571 break
10572 }
10573 v.reset(OpAMD64MOVLload)
10574 v.AuxInt = int32ToAuxInt(off1 + off2)
10575 v.Aux = symToAux(sym)
10576 v.AddArg2(ptr, mem)
10577 return true
10578 }
10579
10580
10581
10582 for {
10583 off1 := auxIntToInt32(v.AuxInt)
10584 sym1 := auxToSym(v.Aux)
10585 if v_0.Op != OpAMD64LEAQ {
10586 break
10587 }
10588 off2 := auxIntToInt32(v_0.AuxInt)
10589 sym2 := auxToSym(v_0.Aux)
10590 base := v_0.Args[0]
10591 mem := v_1
10592 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10593 break
10594 }
10595 v.reset(OpAMD64MOVLload)
10596 v.AuxInt = int32ToAuxInt(off1 + off2)
10597 v.Aux = symToAux(mergeSym(sym1, sym2))
10598 v.AddArg2(base, mem)
10599 return true
10600 }
10601
10602
10603 for {
10604 off := auxIntToInt32(v.AuxInt)
10605 sym := auxToSym(v.Aux)
10606 ptr := v_0
10607 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
10608 break
10609 }
10610 val := v_1.Args[1]
10611 if ptr != v_1.Args[0] {
10612 break
10613 }
10614 v.reset(OpAMD64MOVLf2i)
10615 v.AddArg(val)
10616 return true
10617 }
10618
10619
10620
10621 for {
10622 off := auxIntToInt32(v.AuxInt)
10623 sym := auxToSym(v.Aux)
10624 if v_0.Op != OpSB || !(symIsRO(sym)) {
10625 break
10626 }
10627 v.reset(OpAMD64MOVQconst)
10628 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
10629 return true
10630 }
10631 return false
10632 }
10633 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
10634 v_2 := v.Args[2]
10635 v_1 := v.Args[1]
10636 v_0 := v.Args[0]
10637
10638
10639 for {
10640 off := auxIntToInt32(v.AuxInt)
10641 sym := auxToSym(v.Aux)
10642 ptr := v_0
10643 if v_1.Op != OpAMD64MOVLQSX {
10644 break
10645 }
10646 x := v_1.Args[0]
10647 mem := v_2
10648 v.reset(OpAMD64MOVLstore)
10649 v.AuxInt = int32ToAuxInt(off)
10650 v.Aux = symToAux(sym)
10651 v.AddArg3(ptr, x, mem)
10652 return true
10653 }
10654
10655
10656 for {
10657 off := auxIntToInt32(v.AuxInt)
10658 sym := auxToSym(v.Aux)
10659 ptr := v_0
10660 if v_1.Op != OpAMD64MOVLQZX {
10661 break
10662 }
10663 x := v_1.Args[0]
10664 mem := v_2
10665 v.reset(OpAMD64MOVLstore)
10666 v.AuxInt = int32ToAuxInt(off)
10667 v.Aux = symToAux(sym)
10668 v.AddArg3(ptr, x, mem)
10669 return true
10670 }
10671
10672
10673
10674 for {
10675 off1 := auxIntToInt32(v.AuxInt)
10676 sym := auxToSym(v.Aux)
10677 if v_0.Op != OpAMD64ADDQconst {
10678 break
10679 }
10680 off2 := auxIntToInt32(v_0.AuxInt)
10681 ptr := v_0.Args[0]
10682 val := v_1
10683 mem := v_2
10684 if !(is32Bit(int64(off1) + int64(off2))) {
10685 break
10686 }
10687 v.reset(OpAMD64MOVLstore)
10688 v.AuxInt = int32ToAuxInt(off1 + off2)
10689 v.Aux = symToAux(sym)
10690 v.AddArg3(ptr, val, mem)
10691 return true
10692 }
10693
10694
10695 for {
10696 off := auxIntToInt32(v.AuxInt)
10697 sym := auxToSym(v.Aux)
10698 ptr := v_0
10699 if v_1.Op != OpAMD64MOVLconst {
10700 break
10701 }
10702 c := auxIntToInt32(v_1.AuxInt)
10703 mem := v_2
10704 v.reset(OpAMD64MOVLstoreconst)
10705 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
10706 v.Aux = symToAux(sym)
10707 v.AddArg2(ptr, mem)
10708 return true
10709 }
10710
10711
10712 for {
10713 off := auxIntToInt32(v.AuxInt)
10714 sym := auxToSym(v.Aux)
10715 ptr := v_0
10716 if v_1.Op != OpAMD64MOVQconst {
10717 break
10718 }
10719 c := auxIntToInt64(v_1.AuxInt)
10720 mem := v_2
10721 v.reset(OpAMD64MOVLstoreconst)
10722 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
10723 v.Aux = symToAux(sym)
10724 v.AddArg2(ptr, mem)
10725 return true
10726 }
10727
10728
10729
10730 for {
10731 off1 := auxIntToInt32(v.AuxInt)
10732 sym1 := auxToSym(v.Aux)
10733 if v_0.Op != OpAMD64LEAQ {
10734 break
10735 }
10736 off2 := auxIntToInt32(v_0.AuxInt)
10737 sym2 := auxToSym(v_0.Aux)
10738 base := v_0.Args[0]
10739 val := v_1
10740 mem := v_2
10741 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10742 break
10743 }
10744 v.reset(OpAMD64MOVLstore)
10745 v.AuxInt = int32ToAuxInt(off1 + off2)
10746 v.Aux = symToAux(mergeSym(sym1, sym2))
10747 v.AddArg3(base, val, mem)
10748 return true
10749 }
10750
10751
10752
10753 for {
10754 off := auxIntToInt32(v.AuxInt)
10755 sym := auxToSym(v.Aux)
10756 ptr := v_0
10757 y := v_1
10758 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10759 break
10760 }
10761 mem := y.Args[2]
10762 x := y.Args[0]
10763 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10764 break
10765 }
10766 v.reset(OpAMD64ADDLmodify)
10767 v.AuxInt = int32ToAuxInt(off)
10768 v.Aux = symToAux(sym)
10769 v.AddArg3(ptr, x, mem)
10770 return true
10771 }
10772
10773
10774
10775 for {
10776 off := auxIntToInt32(v.AuxInt)
10777 sym := auxToSym(v.Aux)
10778 ptr := v_0
10779 y := v_1
10780 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10781 break
10782 }
10783 mem := y.Args[2]
10784 x := y.Args[0]
10785 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10786 break
10787 }
10788 v.reset(OpAMD64ANDLmodify)
10789 v.AuxInt = int32ToAuxInt(off)
10790 v.Aux = symToAux(sym)
10791 v.AddArg3(ptr, x, mem)
10792 return true
10793 }
10794
10795
10796
10797 for {
10798 off := auxIntToInt32(v.AuxInt)
10799 sym := auxToSym(v.Aux)
10800 ptr := v_0
10801 y := v_1
10802 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10803 break
10804 }
10805 mem := y.Args[2]
10806 x := y.Args[0]
10807 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10808 break
10809 }
10810 v.reset(OpAMD64ORLmodify)
10811 v.AuxInt = int32ToAuxInt(off)
10812 v.Aux = symToAux(sym)
10813 v.AddArg3(ptr, x, mem)
10814 return true
10815 }
10816
10817
10818
10819 for {
10820 off := auxIntToInt32(v.AuxInt)
10821 sym := auxToSym(v.Aux)
10822 ptr := v_0
10823 y := v_1
10824 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10825 break
10826 }
10827 mem := y.Args[2]
10828 x := y.Args[0]
10829 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10830 break
10831 }
10832 v.reset(OpAMD64XORLmodify)
10833 v.AuxInt = int32ToAuxInt(off)
10834 v.Aux = symToAux(sym)
10835 v.AddArg3(ptr, x, mem)
10836 return true
10837 }
10838
10839
10840
10841 for {
10842 off := auxIntToInt32(v.AuxInt)
10843 sym := auxToSym(v.Aux)
10844 ptr := v_0
10845 y := v_1
10846 if y.Op != OpAMD64ADDL {
10847 break
10848 }
10849 _ = y.Args[1]
10850 y_0 := y.Args[0]
10851 y_1 := y.Args[1]
10852 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10853 l := y_0
10854 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10855 continue
10856 }
10857 mem := l.Args[1]
10858 if ptr != l.Args[0] {
10859 continue
10860 }
10861 x := y_1
10862 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10863 continue
10864 }
10865 v.reset(OpAMD64ADDLmodify)
10866 v.AuxInt = int32ToAuxInt(off)
10867 v.Aux = symToAux(sym)
10868 v.AddArg3(ptr, x, mem)
10869 return true
10870 }
10871 break
10872 }
10873
10874
10875
10876 for {
10877 off := auxIntToInt32(v.AuxInt)
10878 sym := auxToSym(v.Aux)
10879 ptr := v_0
10880 y := v_1
10881 if y.Op != OpAMD64SUBL {
10882 break
10883 }
10884 x := y.Args[1]
10885 l := y.Args[0]
10886 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10887 break
10888 }
10889 mem := l.Args[1]
10890 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10891 break
10892 }
10893 v.reset(OpAMD64SUBLmodify)
10894 v.AuxInt = int32ToAuxInt(off)
10895 v.Aux = symToAux(sym)
10896 v.AddArg3(ptr, x, mem)
10897 return true
10898 }
10899
10900
10901
10902 for {
10903 off := auxIntToInt32(v.AuxInt)
10904 sym := auxToSym(v.Aux)
10905 ptr := v_0
10906 y := v_1
10907 if y.Op != OpAMD64ANDL {
10908 break
10909 }
10910 _ = y.Args[1]
10911 y_0 := y.Args[0]
10912 y_1 := y.Args[1]
10913 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10914 l := y_0
10915 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10916 continue
10917 }
10918 mem := l.Args[1]
10919 if ptr != l.Args[0] {
10920 continue
10921 }
10922 x := y_1
10923 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10924 continue
10925 }
10926 v.reset(OpAMD64ANDLmodify)
10927 v.AuxInt = int32ToAuxInt(off)
10928 v.Aux = symToAux(sym)
10929 v.AddArg3(ptr, x, mem)
10930 return true
10931 }
10932 break
10933 }
10934
10935
10936
10937 for {
10938 off := auxIntToInt32(v.AuxInt)
10939 sym := auxToSym(v.Aux)
10940 ptr := v_0
10941 y := v_1
10942 if y.Op != OpAMD64ORL {
10943 break
10944 }
10945 _ = y.Args[1]
10946 y_0 := y.Args[0]
10947 y_1 := y.Args[1]
10948 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10949 l := y_0
10950 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10951 continue
10952 }
10953 mem := l.Args[1]
10954 if ptr != l.Args[0] {
10955 continue
10956 }
10957 x := y_1
10958 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10959 continue
10960 }
10961 v.reset(OpAMD64ORLmodify)
10962 v.AuxInt = int32ToAuxInt(off)
10963 v.Aux = symToAux(sym)
10964 v.AddArg3(ptr, x, mem)
10965 return true
10966 }
10967 break
10968 }
10969
10970
10971
10972 for {
10973 off := auxIntToInt32(v.AuxInt)
10974 sym := auxToSym(v.Aux)
10975 ptr := v_0
10976 y := v_1
10977 if y.Op != OpAMD64XORL {
10978 break
10979 }
10980 _ = y.Args[1]
10981 y_0 := y.Args[0]
10982 y_1 := y.Args[1]
10983 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10984 l := y_0
10985 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10986 continue
10987 }
10988 mem := l.Args[1]
10989 if ptr != l.Args[0] {
10990 continue
10991 }
10992 x := y_1
10993 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10994 continue
10995 }
10996 v.reset(OpAMD64XORLmodify)
10997 v.AuxInt = int32ToAuxInt(off)
10998 v.Aux = symToAux(sym)
10999 v.AddArg3(ptr, x, mem)
11000 return true
11001 }
11002 break
11003 }
11004
11005
11006
11007 for {
11008 off := auxIntToInt32(v.AuxInt)
11009 sym := auxToSym(v.Aux)
11010 ptr := v_0
11011 a := v_1
11012 if a.Op != OpAMD64ADDLconst {
11013 break
11014 }
11015 c := auxIntToInt32(a.AuxInt)
11016 l := a.Args[0]
11017 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11018 break
11019 }
11020 mem := l.Args[1]
11021 ptr2 := l.Args[0]
11022 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11023 break
11024 }
11025 v.reset(OpAMD64ADDLconstmodify)
11026 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11027 v.Aux = symToAux(sym)
11028 v.AddArg2(ptr, mem)
11029 return true
11030 }
11031
11032
11033
11034 for {
11035 off := auxIntToInt32(v.AuxInt)
11036 sym := auxToSym(v.Aux)
11037 ptr := v_0
11038 a := v_1
11039 if a.Op != OpAMD64ANDLconst {
11040 break
11041 }
11042 c := auxIntToInt32(a.AuxInt)
11043 l := a.Args[0]
11044 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11045 break
11046 }
11047 mem := l.Args[1]
11048 ptr2 := l.Args[0]
11049 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11050 break
11051 }
11052 v.reset(OpAMD64ANDLconstmodify)
11053 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11054 v.Aux = symToAux(sym)
11055 v.AddArg2(ptr, mem)
11056 return true
11057 }
11058
11059
11060
11061 for {
11062 off := auxIntToInt32(v.AuxInt)
11063 sym := auxToSym(v.Aux)
11064 ptr := v_0
11065 a := v_1
11066 if a.Op != OpAMD64ORLconst {
11067 break
11068 }
11069 c := auxIntToInt32(a.AuxInt)
11070 l := a.Args[0]
11071 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11072 break
11073 }
11074 mem := l.Args[1]
11075 ptr2 := l.Args[0]
11076 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11077 break
11078 }
11079 v.reset(OpAMD64ORLconstmodify)
11080 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11081 v.Aux = symToAux(sym)
11082 v.AddArg2(ptr, mem)
11083 return true
11084 }
11085
11086
11087
11088 for {
11089 off := auxIntToInt32(v.AuxInt)
11090 sym := auxToSym(v.Aux)
11091 ptr := v_0
11092 a := v_1
11093 if a.Op != OpAMD64XORLconst {
11094 break
11095 }
11096 c := auxIntToInt32(a.AuxInt)
11097 l := a.Args[0]
11098 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11099 break
11100 }
11101 mem := l.Args[1]
11102 ptr2 := l.Args[0]
11103 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11104 break
11105 }
11106 v.reset(OpAMD64XORLconstmodify)
11107 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11108 v.Aux = symToAux(sym)
11109 v.AddArg2(ptr, mem)
11110 return true
11111 }
11112
11113
11114 for {
11115 off := auxIntToInt32(v.AuxInt)
11116 sym := auxToSym(v.Aux)
11117 ptr := v_0
11118 if v_1.Op != OpAMD64MOVLf2i {
11119 break
11120 }
11121 val := v_1.Args[0]
11122 mem := v_2
11123 v.reset(OpAMD64MOVSSstore)
11124 v.AuxInt = int32ToAuxInt(off)
11125 v.Aux = symToAux(sym)
11126 v.AddArg3(ptr, val, mem)
11127 return true
11128 }
11129
11130
11131
11132 for {
11133 i := auxIntToInt32(v.AuxInt)
11134 s := auxToSym(v.Aux)
11135 p := v_0
11136 x := v_1
11137 if x.Op != OpAMD64BSWAPL {
11138 break
11139 }
11140 w := x.Args[0]
11141 mem := v_2
11142 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
11143 break
11144 }
11145 v.reset(OpAMD64MOVBELstore)
11146 v.AuxInt = int32ToAuxInt(i)
11147 v.Aux = symToAux(s)
11148 v.AddArg3(p, w, mem)
11149 return true
11150 }
11151 return false
11152 }
11153 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
11154 v_1 := v.Args[1]
11155 v_0 := v.Args[0]
11156
11157
11158
11159 for {
11160 sc := auxIntToValAndOff(v.AuxInt)
11161 s := auxToSym(v.Aux)
11162 if v_0.Op != OpAMD64ADDQconst {
11163 break
11164 }
11165 off := auxIntToInt32(v_0.AuxInt)
11166 ptr := v_0.Args[0]
11167 mem := v_1
11168 if !(ValAndOff(sc).canAdd32(off)) {
11169 break
11170 }
11171 v.reset(OpAMD64MOVLstoreconst)
11172 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11173 v.Aux = symToAux(s)
11174 v.AddArg2(ptr, mem)
11175 return true
11176 }
11177
11178
11179
11180 for {
11181 sc := auxIntToValAndOff(v.AuxInt)
11182 sym1 := auxToSym(v.Aux)
11183 if v_0.Op != OpAMD64LEAQ {
11184 break
11185 }
11186 off := auxIntToInt32(v_0.AuxInt)
11187 sym2 := auxToSym(v_0.Aux)
11188 ptr := v_0.Args[0]
11189 mem := v_1
11190 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11191 break
11192 }
11193 v.reset(OpAMD64MOVLstoreconst)
11194 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11195 v.Aux = symToAux(mergeSym(sym1, sym2))
11196 v.AddArg2(ptr, mem)
11197 return true
11198 }
11199 return false
11200 }
11201 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
11202 v_1 := v.Args[1]
11203 v_0 := v.Args[0]
11204
11205
11206
11207 for {
11208 off1 := auxIntToInt32(v.AuxInt)
11209 sym := auxToSym(v.Aux)
11210 if v_0.Op != OpAMD64ADDQconst {
11211 break
11212 }
11213 off2 := auxIntToInt32(v_0.AuxInt)
11214 ptr := v_0.Args[0]
11215 mem := v_1
11216 if !(is32Bit(int64(off1) + int64(off2))) {
11217 break
11218 }
11219 v.reset(OpAMD64MOVOload)
11220 v.AuxInt = int32ToAuxInt(off1 + off2)
11221 v.Aux = symToAux(sym)
11222 v.AddArg2(ptr, mem)
11223 return true
11224 }
11225
11226
11227
11228 for {
11229 off1 := auxIntToInt32(v.AuxInt)
11230 sym1 := auxToSym(v.Aux)
11231 if v_0.Op != OpAMD64LEAQ {
11232 break
11233 }
11234 off2 := auxIntToInt32(v_0.AuxInt)
11235 sym2 := auxToSym(v_0.Aux)
11236 base := v_0.Args[0]
11237 mem := v_1
11238 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11239 break
11240 }
11241 v.reset(OpAMD64MOVOload)
11242 v.AuxInt = int32ToAuxInt(off1 + off2)
11243 v.Aux = symToAux(mergeSym(sym1, sym2))
11244 v.AddArg2(base, mem)
11245 return true
11246 }
11247 return false
11248 }
11249 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
11250 v_2 := v.Args[2]
11251 v_1 := v.Args[1]
11252 v_0 := v.Args[0]
11253 b := v.Block
11254 config := b.Func.Config
11255 typ := &b.Func.Config.Types
11256
11257
11258
11259 for {
11260 off1 := auxIntToInt32(v.AuxInt)
11261 sym := auxToSym(v.Aux)
11262 if v_0.Op != OpAMD64ADDQconst {
11263 break
11264 }
11265 off2 := auxIntToInt32(v_0.AuxInt)
11266 ptr := v_0.Args[0]
11267 val := v_1
11268 mem := v_2
11269 if !(is32Bit(int64(off1) + int64(off2))) {
11270 break
11271 }
11272 v.reset(OpAMD64MOVOstore)
11273 v.AuxInt = int32ToAuxInt(off1 + off2)
11274 v.Aux = symToAux(sym)
11275 v.AddArg3(ptr, val, mem)
11276 return true
11277 }
11278
11279
11280
11281 for {
11282 off1 := auxIntToInt32(v.AuxInt)
11283 sym1 := auxToSym(v.Aux)
11284 if v_0.Op != OpAMD64LEAQ {
11285 break
11286 }
11287 off2 := auxIntToInt32(v_0.AuxInt)
11288 sym2 := auxToSym(v_0.Aux)
11289 base := v_0.Args[0]
11290 val := v_1
11291 mem := v_2
11292 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11293 break
11294 }
11295 v.reset(OpAMD64MOVOstore)
11296 v.AuxInt = int32ToAuxInt(off1 + off2)
11297 v.Aux = symToAux(mergeSym(sym1, sym2))
11298 v.AddArg3(base, val, mem)
11299 return true
11300 }
11301
11302
11303
11304 for {
11305 dstOff := auxIntToInt32(v.AuxInt)
11306 dstSym := auxToSym(v.Aux)
11307 ptr := v_0
11308 if v_1.Op != OpAMD64MOVOload {
11309 break
11310 }
11311 srcOff := auxIntToInt32(v_1.AuxInt)
11312 srcSym := auxToSym(v_1.Aux)
11313 v_1_0 := v_1.Args[0]
11314 if v_1_0.Op != OpSB {
11315 break
11316 }
11317 mem := v_2
11318 if !(symIsRO(srcSym)) {
11319 break
11320 }
11321 v.reset(OpAMD64MOVQstore)
11322 v.AuxInt = int32ToAuxInt(dstOff + 8)
11323 v.Aux = symToAux(dstSym)
11324 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11325 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
11326 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
11327 v1.AuxInt = int32ToAuxInt(dstOff)
11328 v1.Aux = symToAux(dstSym)
11329 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11330 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
11331 v1.AddArg3(ptr, v2, mem)
11332 v.AddArg3(ptr, v0, v1)
11333 return true
11334 }
11335 return false
11336 }
11337 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
11338 v_1 := v.Args[1]
11339 v_0 := v.Args[0]
11340
11341
11342
11343 for {
11344 sc := auxIntToValAndOff(v.AuxInt)
11345 s := auxToSym(v.Aux)
11346 if v_0.Op != OpAMD64ADDQconst {
11347 break
11348 }
11349 off := auxIntToInt32(v_0.AuxInt)
11350 ptr := v_0.Args[0]
11351 mem := v_1
11352 if !(ValAndOff(sc).canAdd32(off)) {
11353 break
11354 }
11355 v.reset(OpAMD64MOVOstoreconst)
11356 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11357 v.Aux = symToAux(s)
11358 v.AddArg2(ptr, mem)
11359 return true
11360 }
11361
11362
11363
11364 for {
11365 sc := auxIntToValAndOff(v.AuxInt)
11366 sym1 := auxToSym(v.Aux)
11367 if v_0.Op != OpAMD64LEAQ {
11368 break
11369 }
11370 off := auxIntToInt32(v_0.AuxInt)
11371 sym2 := auxToSym(v_0.Aux)
11372 ptr := v_0.Args[0]
11373 mem := v_1
11374 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11375 break
11376 }
11377 v.reset(OpAMD64MOVOstoreconst)
11378 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11379 v.Aux = symToAux(mergeSym(sym1, sym2))
11380 v.AddArg2(ptr, mem)
11381 return true
11382 }
11383 return false
11384 }
11385 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
11386 v_1 := v.Args[1]
11387 v_0 := v.Args[0]
11388
11389
11390
11391 for {
11392 off1 := auxIntToInt32(v.AuxInt)
11393 sym := auxToSym(v.Aux)
11394 if v_0.Op != OpAMD64ADDQconst {
11395 break
11396 }
11397 off2 := auxIntToInt32(v_0.AuxInt)
11398 ptr := v_0.Args[0]
11399 mem := v_1
11400 if !(is32Bit(int64(off1) + int64(off2))) {
11401 break
11402 }
11403 v.reset(OpAMD64MOVQatomicload)
11404 v.AuxInt = int32ToAuxInt(off1 + off2)
11405 v.Aux = symToAux(sym)
11406 v.AddArg2(ptr, mem)
11407 return true
11408 }
11409
11410
11411
11412 for {
11413 off1 := auxIntToInt32(v.AuxInt)
11414 sym1 := auxToSym(v.Aux)
11415 if v_0.Op != OpAMD64LEAQ {
11416 break
11417 }
11418 off2 := auxIntToInt32(v_0.AuxInt)
11419 sym2 := auxToSym(v_0.Aux)
11420 ptr := v_0.Args[0]
11421 mem := v_1
11422 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11423 break
11424 }
11425 v.reset(OpAMD64MOVQatomicload)
11426 v.AuxInt = int32ToAuxInt(off1 + off2)
11427 v.Aux = symToAux(mergeSym(sym1, sym2))
11428 v.AddArg2(ptr, mem)
11429 return true
11430 }
11431 return false
11432 }
11433 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
11434 v_0 := v.Args[0]
11435 b := v.Block
11436
11437
11438
11439 for {
11440 t := v.Type
11441 if v_0.Op != OpArg {
11442 break
11443 }
11444 u := v_0.Type
11445 off := auxIntToInt32(v_0.AuxInt)
11446 sym := auxToSym(v_0.Aux)
11447 if !(t.Size() == u.Size()) {
11448 break
11449 }
11450 b = b.Func.Entry
11451 v0 := b.NewValue0(v.Pos, OpArg, t)
11452 v.copyOf(v0)
11453 v0.AuxInt = int32ToAuxInt(off)
11454 v0.Aux = symToAux(sym)
11455 return true
11456 }
11457 return false
11458 }
11459 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
11460 v_0 := v.Args[0]
11461 b := v.Block
11462
11463
11464
11465 for {
11466 t := v.Type
11467 if v_0.Op != OpArg {
11468 break
11469 }
11470 u := v_0.Type
11471 off := auxIntToInt32(v_0.AuxInt)
11472 sym := auxToSym(v_0.Aux)
11473 if !(t.Size() == u.Size()) {
11474 break
11475 }
11476 b = b.Func.Entry
11477 v0 := b.NewValue0(v.Pos, OpArg, t)
11478 v.copyOf(v0)
11479 v0.AuxInt = int32ToAuxInt(off)
11480 v0.Aux = symToAux(sym)
11481 return true
11482 }
11483 return false
11484 }
11485 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
11486 v_1 := v.Args[1]
11487 v_0 := v.Args[0]
11488 b := v.Block
11489 config := b.Func.Config
11490
11491
11492
11493 for {
11494 off := auxIntToInt32(v.AuxInt)
11495 sym := auxToSym(v.Aux)
11496 ptr := v_0
11497 if v_1.Op != OpAMD64MOVQstore {
11498 break
11499 }
11500 off2 := auxIntToInt32(v_1.AuxInt)
11501 sym2 := auxToSym(v_1.Aux)
11502 x := v_1.Args[1]
11503 ptr2 := v_1.Args[0]
11504 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11505 break
11506 }
11507 v.copyOf(x)
11508 return true
11509 }
11510
11511
11512
11513 for {
11514 off1 := auxIntToInt32(v.AuxInt)
11515 sym := auxToSym(v.Aux)
11516 if v_0.Op != OpAMD64ADDQconst {
11517 break
11518 }
11519 off2 := auxIntToInt32(v_0.AuxInt)
11520 ptr := v_0.Args[0]
11521 mem := v_1
11522 if !(is32Bit(int64(off1) + int64(off2))) {
11523 break
11524 }
11525 v.reset(OpAMD64MOVQload)
11526 v.AuxInt = int32ToAuxInt(off1 + off2)
11527 v.Aux = symToAux(sym)
11528 v.AddArg2(ptr, mem)
11529 return true
11530 }
11531
11532
11533
11534 for {
11535 off1 := auxIntToInt32(v.AuxInt)
11536 sym1 := auxToSym(v.Aux)
11537 if v_0.Op != OpAMD64LEAQ {
11538 break
11539 }
11540 off2 := auxIntToInt32(v_0.AuxInt)
11541 sym2 := auxToSym(v_0.Aux)
11542 base := v_0.Args[0]
11543 mem := v_1
11544 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11545 break
11546 }
11547 v.reset(OpAMD64MOVQload)
11548 v.AuxInt = int32ToAuxInt(off1 + off2)
11549 v.Aux = symToAux(mergeSym(sym1, sym2))
11550 v.AddArg2(base, mem)
11551 return true
11552 }
11553
11554
11555 for {
11556 off := auxIntToInt32(v.AuxInt)
11557 sym := auxToSym(v.Aux)
11558 ptr := v_0
11559 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
11560 break
11561 }
11562 val := v_1.Args[1]
11563 if ptr != v_1.Args[0] {
11564 break
11565 }
11566 v.reset(OpAMD64MOVQf2i)
11567 v.AddArg(val)
11568 return true
11569 }
11570
11571
11572
11573 for {
11574 off := auxIntToInt32(v.AuxInt)
11575 sym := auxToSym(v.Aux)
11576 if v_0.Op != OpSB || !(symIsRO(sym)) {
11577 break
11578 }
11579 v.reset(OpAMD64MOVQconst)
11580 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11581 return true
11582 }
11583 return false
11584 }
11585 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
11586 v_2 := v.Args[2]
11587 v_1 := v.Args[1]
11588 v_0 := v.Args[0]
11589
11590
11591
11592 for {
11593 off1 := auxIntToInt32(v.AuxInt)
11594 sym := auxToSym(v.Aux)
11595 if v_0.Op != OpAMD64ADDQconst {
11596 break
11597 }
11598 off2 := auxIntToInt32(v_0.AuxInt)
11599 ptr := v_0.Args[0]
11600 val := v_1
11601 mem := v_2
11602 if !(is32Bit(int64(off1) + int64(off2))) {
11603 break
11604 }
11605 v.reset(OpAMD64MOVQstore)
11606 v.AuxInt = int32ToAuxInt(off1 + off2)
11607 v.Aux = symToAux(sym)
11608 v.AddArg3(ptr, val, mem)
11609 return true
11610 }
11611
11612
11613
11614 for {
11615 off := auxIntToInt32(v.AuxInt)
11616 sym := auxToSym(v.Aux)
11617 ptr := v_0
11618 if v_1.Op != OpAMD64MOVQconst {
11619 break
11620 }
11621 c := auxIntToInt64(v_1.AuxInt)
11622 mem := v_2
11623 if !(validVal(c)) {
11624 break
11625 }
11626 v.reset(OpAMD64MOVQstoreconst)
11627 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11628 v.Aux = symToAux(sym)
11629 v.AddArg2(ptr, mem)
11630 return true
11631 }
11632
11633
11634
11635 for {
11636 off1 := auxIntToInt32(v.AuxInt)
11637 sym1 := auxToSym(v.Aux)
11638 if v_0.Op != OpAMD64LEAQ {
11639 break
11640 }
11641 off2 := auxIntToInt32(v_0.AuxInt)
11642 sym2 := auxToSym(v_0.Aux)
11643 base := v_0.Args[0]
11644 val := v_1
11645 mem := v_2
11646 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11647 break
11648 }
11649 v.reset(OpAMD64MOVQstore)
11650 v.AuxInt = int32ToAuxInt(off1 + off2)
11651 v.Aux = symToAux(mergeSym(sym1, sym2))
11652 v.AddArg3(base, val, mem)
11653 return true
11654 }
11655
11656
11657
11658 for {
11659 off := auxIntToInt32(v.AuxInt)
11660 sym := auxToSym(v.Aux)
11661 ptr := v_0
11662 y := v_1
11663 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11664 break
11665 }
11666 mem := y.Args[2]
11667 x := y.Args[0]
11668 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11669 break
11670 }
11671 v.reset(OpAMD64ADDQmodify)
11672 v.AuxInt = int32ToAuxInt(off)
11673 v.Aux = symToAux(sym)
11674 v.AddArg3(ptr, x, mem)
11675 return true
11676 }
11677
11678
11679
11680 for {
11681 off := auxIntToInt32(v.AuxInt)
11682 sym := auxToSym(v.Aux)
11683 ptr := v_0
11684 y := v_1
11685 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11686 break
11687 }
11688 mem := y.Args[2]
11689 x := y.Args[0]
11690 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11691 break
11692 }
11693 v.reset(OpAMD64ANDQmodify)
11694 v.AuxInt = int32ToAuxInt(off)
11695 v.Aux = symToAux(sym)
11696 v.AddArg3(ptr, x, mem)
11697 return true
11698 }
11699
11700
11701
11702 for {
11703 off := auxIntToInt32(v.AuxInt)
11704 sym := auxToSym(v.Aux)
11705 ptr := v_0
11706 y := v_1
11707 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11708 break
11709 }
11710 mem := y.Args[2]
11711 x := y.Args[0]
11712 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11713 break
11714 }
11715 v.reset(OpAMD64ORQmodify)
11716 v.AuxInt = int32ToAuxInt(off)
11717 v.Aux = symToAux(sym)
11718 v.AddArg3(ptr, x, mem)
11719 return true
11720 }
11721
11722
11723
11724 for {
11725 off := auxIntToInt32(v.AuxInt)
11726 sym := auxToSym(v.Aux)
11727 ptr := v_0
11728 y := v_1
11729 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11730 break
11731 }
11732 mem := y.Args[2]
11733 x := y.Args[0]
11734 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11735 break
11736 }
11737 v.reset(OpAMD64XORQmodify)
11738 v.AuxInt = int32ToAuxInt(off)
11739 v.Aux = symToAux(sym)
11740 v.AddArg3(ptr, x, mem)
11741 return true
11742 }
11743
11744
11745
11746 for {
11747 off := auxIntToInt32(v.AuxInt)
11748 sym := auxToSym(v.Aux)
11749 ptr := v_0
11750 y := v_1
11751 if y.Op != OpAMD64ADDQ {
11752 break
11753 }
11754 _ = y.Args[1]
11755 y_0 := y.Args[0]
11756 y_1 := y.Args[1]
11757 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11758 l := y_0
11759 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11760 continue
11761 }
11762 mem := l.Args[1]
11763 if ptr != l.Args[0] {
11764 continue
11765 }
11766 x := y_1
11767 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11768 continue
11769 }
11770 v.reset(OpAMD64ADDQmodify)
11771 v.AuxInt = int32ToAuxInt(off)
11772 v.Aux = symToAux(sym)
11773 v.AddArg3(ptr, x, mem)
11774 return true
11775 }
11776 break
11777 }
11778
11779
11780
11781 for {
11782 off := auxIntToInt32(v.AuxInt)
11783 sym := auxToSym(v.Aux)
11784 ptr := v_0
11785 y := v_1
11786 if y.Op != OpAMD64SUBQ {
11787 break
11788 }
11789 x := y.Args[1]
11790 l := y.Args[0]
11791 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11792 break
11793 }
11794 mem := l.Args[1]
11795 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11796 break
11797 }
11798 v.reset(OpAMD64SUBQmodify)
11799 v.AuxInt = int32ToAuxInt(off)
11800 v.Aux = symToAux(sym)
11801 v.AddArg3(ptr, x, mem)
11802 return true
11803 }
11804
11805
11806
11807 for {
11808 off := auxIntToInt32(v.AuxInt)
11809 sym := auxToSym(v.Aux)
11810 ptr := v_0
11811 y := v_1
11812 if y.Op != OpAMD64ANDQ {
11813 break
11814 }
11815 _ = y.Args[1]
11816 y_0 := y.Args[0]
11817 y_1 := y.Args[1]
11818 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11819 l := y_0
11820 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11821 continue
11822 }
11823 mem := l.Args[1]
11824 if ptr != l.Args[0] {
11825 continue
11826 }
11827 x := y_1
11828 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11829 continue
11830 }
11831 v.reset(OpAMD64ANDQmodify)
11832 v.AuxInt = int32ToAuxInt(off)
11833 v.Aux = symToAux(sym)
11834 v.AddArg3(ptr, x, mem)
11835 return true
11836 }
11837 break
11838 }
11839
11840
11841
11842 for {
11843 off := auxIntToInt32(v.AuxInt)
11844 sym := auxToSym(v.Aux)
11845 ptr := v_0
11846 y := v_1
11847 if y.Op != OpAMD64ORQ {
11848 break
11849 }
11850 _ = y.Args[1]
11851 y_0 := y.Args[0]
11852 y_1 := y.Args[1]
11853 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11854 l := y_0
11855 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11856 continue
11857 }
11858 mem := l.Args[1]
11859 if ptr != l.Args[0] {
11860 continue
11861 }
11862 x := y_1
11863 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11864 continue
11865 }
11866 v.reset(OpAMD64ORQmodify)
11867 v.AuxInt = int32ToAuxInt(off)
11868 v.Aux = symToAux(sym)
11869 v.AddArg3(ptr, x, mem)
11870 return true
11871 }
11872 break
11873 }
11874
11875
11876
11877 for {
11878 off := auxIntToInt32(v.AuxInt)
11879 sym := auxToSym(v.Aux)
11880 ptr := v_0
11881 y := v_1
11882 if y.Op != OpAMD64XORQ {
11883 break
11884 }
11885 _ = y.Args[1]
11886 y_0 := y.Args[0]
11887 y_1 := y.Args[1]
11888 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11889 l := y_0
11890 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11891 continue
11892 }
11893 mem := l.Args[1]
11894 if ptr != l.Args[0] {
11895 continue
11896 }
11897 x := y_1
11898 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11899 continue
11900 }
11901 v.reset(OpAMD64XORQmodify)
11902 v.AuxInt = int32ToAuxInt(off)
11903 v.Aux = symToAux(sym)
11904 v.AddArg3(ptr, x, mem)
11905 return true
11906 }
11907 break
11908 }
11909
11910
11911
11912 for {
11913 off := auxIntToInt32(v.AuxInt)
11914 sym := auxToSym(v.Aux)
11915 ptr := v_0
11916 x := v_1
11917 if x.Op != OpAMD64BTSQconst {
11918 break
11919 }
11920 c := auxIntToInt8(x.AuxInt)
11921 l := x.Args[0]
11922 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11923 break
11924 }
11925 mem := l.Args[1]
11926 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11927 break
11928 }
11929 v.reset(OpAMD64BTSQconstmodify)
11930 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11931 v.Aux = symToAux(sym)
11932 v.AddArg2(ptr, mem)
11933 return true
11934 }
11935
11936
11937
11938 for {
11939 off := auxIntToInt32(v.AuxInt)
11940 sym := auxToSym(v.Aux)
11941 ptr := v_0
11942 x := v_1
11943 if x.Op != OpAMD64BTRQconst {
11944 break
11945 }
11946 c := auxIntToInt8(x.AuxInt)
11947 l := x.Args[0]
11948 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11949 break
11950 }
11951 mem := l.Args[1]
11952 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11953 break
11954 }
11955 v.reset(OpAMD64BTRQconstmodify)
11956 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11957 v.Aux = symToAux(sym)
11958 v.AddArg2(ptr, mem)
11959 return true
11960 }
11961
11962
11963
11964 for {
11965 off := auxIntToInt32(v.AuxInt)
11966 sym := auxToSym(v.Aux)
11967 ptr := v_0
11968 x := v_1
11969 if x.Op != OpAMD64BTCQconst {
11970 break
11971 }
11972 c := auxIntToInt8(x.AuxInt)
11973 l := x.Args[0]
11974 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11975 break
11976 }
11977 mem := l.Args[1]
11978 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11979 break
11980 }
11981 v.reset(OpAMD64BTCQconstmodify)
11982 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11983 v.Aux = symToAux(sym)
11984 v.AddArg2(ptr, mem)
11985 return true
11986 }
11987
11988
11989
11990 for {
11991 off := auxIntToInt32(v.AuxInt)
11992 sym := auxToSym(v.Aux)
11993 ptr := v_0
11994 a := v_1
11995 if a.Op != OpAMD64ADDQconst {
11996 break
11997 }
11998 c := auxIntToInt32(a.AuxInt)
11999 l := a.Args[0]
12000 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12001 break
12002 }
12003 mem := l.Args[1]
12004 ptr2 := l.Args[0]
12005 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12006 break
12007 }
12008 v.reset(OpAMD64ADDQconstmodify)
12009 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12010 v.Aux = symToAux(sym)
12011 v.AddArg2(ptr, mem)
12012 return true
12013 }
12014
12015
12016
12017 for {
12018 off := auxIntToInt32(v.AuxInt)
12019 sym := auxToSym(v.Aux)
12020 ptr := v_0
12021 a := v_1
12022 if a.Op != OpAMD64ANDQconst {
12023 break
12024 }
12025 c := auxIntToInt32(a.AuxInt)
12026 l := a.Args[0]
12027 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12028 break
12029 }
12030 mem := l.Args[1]
12031 ptr2 := l.Args[0]
12032 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12033 break
12034 }
12035 v.reset(OpAMD64ANDQconstmodify)
12036 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12037 v.Aux = symToAux(sym)
12038 v.AddArg2(ptr, mem)
12039 return true
12040 }
12041
12042
12043
12044 for {
12045 off := auxIntToInt32(v.AuxInt)
12046 sym := auxToSym(v.Aux)
12047 ptr := v_0
12048 a := v_1
12049 if a.Op != OpAMD64ORQconst {
12050 break
12051 }
12052 c := auxIntToInt32(a.AuxInt)
12053 l := a.Args[0]
12054 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12055 break
12056 }
12057 mem := l.Args[1]
12058 ptr2 := l.Args[0]
12059 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12060 break
12061 }
12062 v.reset(OpAMD64ORQconstmodify)
12063 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12064 v.Aux = symToAux(sym)
12065 v.AddArg2(ptr, mem)
12066 return true
12067 }
12068
12069
12070
12071 for {
12072 off := auxIntToInt32(v.AuxInt)
12073 sym := auxToSym(v.Aux)
12074 ptr := v_0
12075 a := v_1
12076 if a.Op != OpAMD64XORQconst {
12077 break
12078 }
12079 c := auxIntToInt32(a.AuxInt)
12080 l := a.Args[0]
12081 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12082 break
12083 }
12084 mem := l.Args[1]
12085 ptr2 := l.Args[0]
12086 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12087 break
12088 }
12089 v.reset(OpAMD64XORQconstmodify)
12090 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12091 v.Aux = symToAux(sym)
12092 v.AddArg2(ptr, mem)
12093 return true
12094 }
12095
12096
12097 for {
12098 off := auxIntToInt32(v.AuxInt)
12099 sym := auxToSym(v.Aux)
12100 ptr := v_0
12101 if v_1.Op != OpAMD64MOVQf2i {
12102 break
12103 }
12104 val := v_1.Args[0]
12105 mem := v_2
12106 v.reset(OpAMD64MOVSDstore)
12107 v.AuxInt = int32ToAuxInt(off)
12108 v.Aux = symToAux(sym)
12109 v.AddArg3(ptr, val, mem)
12110 return true
12111 }
12112
12113
12114
12115 for {
12116 i := auxIntToInt32(v.AuxInt)
12117 s := auxToSym(v.Aux)
12118 p := v_0
12119 x := v_1
12120 if x.Op != OpAMD64BSWAPQ {
12121 break
12122 }
12123 w := x.Args[0]
12124 mem := v_2
12125 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12126 break
12127 }
12128 v.reset(OpAMD64MOVBEQstore)
12129 v.AuxInt = int32ToAuxInt(i)
12130 v.Aux = symToAux(s)
12131 v.AddArg3(p, w, mem)
12132 return true
12133 }
12134 return false
12135 }
12136 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
12137 v_1 := v.Args[1]
12138 v_0 := v.Args[0]
12139 b := v.Block
12140 config := b.Func.Config
12141
12142
12143
12144 for {
12145 sc := auxIntToValAndOff(v.AuxInt)
12146 s := auxToSym(v.Aux)
12147 if v_0.Op != OpAMD64ADDQconst {
12148 break
12149 }
12150 off := auxIntToInt32(v_0.AuxInt)
12151 ptr := v_0.Args[0]
12152 mem := v_1
12153 if !(ValAndOff(sc).canAdd32(off)) {
12154 break
12155 }
12156 v.reset(OpAMD64MOVQstoreconst)
12157 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12158 v.Aux = symToAux(s)
12159 v.AddArg2(ptr, mem)
12160 return true
12161 }
12162
12163
12164
12165 for {
12166 sc := auxIntToValAndOff(v.AuxInt)
12167 sym1 := auxToSym(v.Aux)
12168 if v_0.Op != OpAMD64LEAQ {
12169 break
12170 }
12171 off := auxIntToInt32(v_0.AuxInt)
12172 sym2 := auxToSym(v_0.Aux)
12173 ptr := v_0.Args[0]
12174 mem := v_1
12175 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12176 break
12177 }
12178 v.reset(OpAMD64MOVQstoreconst)
12179 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12180 v.Aux = symToAux(mergeSym(sym1, sym2))
12181 v.AddArg2(ptr, mem)
12182 return true
12183 }
12184
12185
12186
12187 for {
12188 c := auxIntToValAndOff(v.AuxInt)
12189 s := auxToSym(v.Aux)
12190 p1 := v_0
12191 x := v_1
12192 if x.Op != OpAMD64MOVQstoreconst {
12193 break
12194 }
12195 a := auxIntToValAndOff(x.AuxInt)
12196 if auxToSym(x.Aux) != s {
12197 break
12198 }
12199 mem := x.Args[1]
12200 p0 := x.Args[0]
12201 if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12202 break
12203 }
12204 v.reset(OpAMD64MOVOstoreconst)
12205 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12206 v.Aux = symToAux(s)
12207 v.AddArg2(p0, mem)
12208 return true
12209 }
12210
12211
12212
12213 for {
12214 a := auxIntToValAndOff(v.AuxInt)
12215 s := auxToSym(v.Aux)
12216 p0 := v_0
12217 x := v_1
12218 if x.Op != OpAMD64MOVQstoreconst {
12219 break
12220 }
12221 c := auxIntToValAndOff(x.AuxInt)
12222 if auxToSym(x.Aux) != s {
12223 break
12224 }
12225 mem := x.Args[1]
12226 p1 := x.Args[0]
12227 if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12228 break
12229 }
12230 v.reset(OpAMD64MOVOstoreconst)
12231 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12232 v.Aux = symToAux(s)
12233 v.AddArg2(p0, mem)
12234 return true
12235 }
12236 return false
12237 }
12238 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
12239 v_1 := v.Args[1]
12240 v_0 := v.Args[0]
12241
12242
12243
12244 for {
12245 off1 := auxIntToInt32(v.AuxInt)
12246 sym := auxToSym(v.Aux)
12247 if v_0.Op != OpAMD64ADDQconst {
12248 break
12249 }
12250 off2 := auxIntToInt32(v_0.AuxInt)
12251 ptr := v_0.Args[0]
12252 mem := v_1
12253 if !(is32Bit(int64(off1) + int64(off2))) {
12254 break
12255 }
12256 v.reset(OpAMD64MOVSDload)
12257 v.AuxInt = int32ToAuxInt(off1 + off2)
12258 v.Aux = symToAux(sym)
12259 v.AddArg2(ptr, mem)
12260 return true
12261 }
12262
12263
12264
12265 for {
12266 off1 := auxIntToInt32(v.AuxInt)
12267 sym1 := auxToSym(v.Aux)
12268 if v_0.Op != OpAMD64LEAQ {
12269 break
12270 }
12271 off2 := auxIntToInt32(v_0.AuxInt)
12272 sym2 := auxToSym(v_0.Aux)
12273 base := v_0.Args[0]
12274 mem := v_1
12275 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12276 break
12277 }
12278 v.reset(OpAMD64MOVSDload)
12279 v.AuxInt = int32ToAuxInt(off1 + off2)
12280 v.Aux = symToAux(mergeSym(sym1, sym2))
12281 v.AddArg2(base, mem)
12282 return true
12283 }
12284
12285
12286 for {
12287 off := auxIntToInt32(v.AuxInt)
12288 sym := auxToSym(v.Aux)
12289 ptr := v_0
12290 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12291 break
12292 }
12293 val := v_1.Args[1]
12294 if ptr != v_1.Args[0] {
12295 break
12296 }
12297 v.reset(OpAMD64MOVQi2f)
12298 v.AddArg(val)
12299 return true
12300 }
12301 return false
12302 }
12303 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
12304 v_2 := v.Args[2]
12305 v_1 := v.Args[1]
12306 v_0 := v.Args[0]
12307
12308
12309
12310 for {
12311 off1 := auxIntToInt32(v.AuxInt)
12312 sym := auxToSym(v.Aux)
12313 if v_0.Op != OpAMD64ADDQconst {
12314 break
12315 }
12316 off2 := auxIntToInt32(v_0.AuxInt)
12317 ptr := v_0.Args[0]
12318 val := v_1
12319 mem := v_2
12320 if !(is32Bit(int64(off1) + int64(off2))) {
12321 break
12322 }
12323 v.reset(OpAMD64MOVSDstore)
12324 v.AuxInt = int32ToAuxInt(off1 + off2)
12325 v.Aux = symToAux(sym)
12326 v.AddArg3(ptr, val, mem)
12327 return true
12328 }
12329
12330
12331
12332 for {
12333 off1 := auxIntToInt32(v.AuxInt)
12334 sym1 := auxToSym(v.Aux)
12335 if v_0.Op != OpAMD64LEAQ {
12336 break
12337 }
12338 off2 := auxIntToInt32(v_0.AuxInt)
12339 sym2 := auxToSym(v_0.Aux)
12340 base := v_0.Args[0]
12341 val := v_1
12342 mem := v_2
12343 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12344 break
12345 }
12346 v.reset(OpAMD64MOVSDstore)
12347 v.AuxInt = int32ToAuxInt(off1 + off2)
12348 v.Aux = symToAux(mergeSym(sym1, sym2))
12349 v.AddArg3(base, val, mem)
12350 return true
12351 }
12352
12353
12354 for {
12355 off := auxIntToInt32(v.AuxInt)
12356 sym := auxToSym(v.Aux)
12357 ptr := v_0
12358 if v_1.Op != OpAMD64MOVQi2f {
12359 break
12360 }
12361 val := v_1.Args[0]
12362 mem := v_2
12363 v.reset(OpAMD64MOVQstore)
12364 v.AuxInt = int32ToAuxInt(off)
12365 v.Aux = symToAux(sym)
12366 v.AddArg3(ptr, val, mem)
12367 return true
12368 }
12369 return false
12370 }
12371 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
12372 v_1 := v.Args[1]
12373 v_0 := v.Args[0]
12374
12375
12376
12377 for {
12378 off1 := auxIntToInt32(v.AuxInt)
12379 sym := auxToSym(v.Aux)
12380 if v_0.Op != OpAMD64ADDQconst {
12381 break
12382 }
12383 off2 := auxIntToInt32(v_0.AuxInt)
12384 ptr := v_0.Args[0]
12385 mem := v_1
12386 if !(is32Bit(int64(off1) + int64(off2))) {
12387 break
12388 }
12389 v.reset(OpAMD64MOVSSload)
12390 v.AuxInt = int32ToAuxInt(off1 + off2)
12391 v.Aux = symToAux(sym)
12392 v.AddArg2(ptr, mem)
12393 return true
12394 }
12395
12396
12397
12398 for {
12399 off1 := auxIntToInt32(v.AuxInt)
12400 sym1 := auxToSym(v.Aux)
12401 if v_0.Op != OpAMD64LEAQ {
12402 break
12403 }
12404 off2 := auxIntToInt32(v_0.AuxInt)
12405 sym2 := auxToSym(v_0.Aux)
12406 base := v_0.Args[0]
12407 mem := v_1
12408 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12409 break
12410 }
12411 v.reset(OpAMD64MOVSSload)
12412 v.AuxInt = int32ToAuxInt(off1 + off2)
12413 v.Aux = symToAux(mergeSym(sym1, sym2))
12414 v.AddArg2(base, mem)
12415 return true
12416 }
12417
12418
12419 for {
12420 off := auxIntToInt32(v.AuxInt)
12421 sym := auxToSym(v.Aux)
12422 ptr := v_0
12423 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12424 break
12425 }
12426 val := v_1.Args[1]
12427 if ptr != v_1.Args[0] {
12428 break
12429 }
12430 v.reset(OpAMD64MOVLi2f)
12431 v.AddArg(val)
12432 return true
12433 }
12434 return false
12435 }
12436 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
12437 v_2 := v.Args[2]
12438 v_1 := v.Args[1]
12439 v_0 := v.Args[0]
12440
12441
12442
12443 for {
12444 off1 := auxIntToInt32(v.AuxInt)
12445 sym := auxToSym(v.Aux)
12446 if v_0.Op != OpAMD64ADDQconst {
12447 break
12448 }
12449 off2 := auxIntToInt32(v_0.AuxInt)
12450 ptr := v_0.Args[0]
12451 val := v_1
12452 mem := v_2
12453 if !(is32Bit(int64(off1) + int64(off2))) {
12454 break
12455 }
12456 v.reset(OpAMD64MOVSSstore)
12457 v.AuxInt = int32ToAuxInt(off1 + off2)
12458 v.Aux = symToAux(sym)
12459 v.AddArg3(ptr, val, mem)
12460 return true
12461 }
12462
12463
12464
12465 for {
12466 off1 := auxIntToInt32(v.AuxInt)
12467 sym1 := auxToSym(v.Aux)
12468 if v_0.Op != OpAMD64LEAQ {
12469 break
12470 }
12471 off2 := auxIntToInt32(v_0.AuxInt)
12472 sym2 := auxToSym(v_0.Aux)
12473 base := v_0.Args[0]
12474 val := v_1
12475 mem := v_2
12476 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12477 break
12478 }
12479 v.reset(OpAMD64MOVSSstore)
12480 v.AuxInt = int32ToAuxInt(off1 + off2)
12481 v.Aux = symToAux(mergeSym(sym1, sym2))
12482 v.AddArg3(base, val, mem)
12483 return true
12484 }
12485
12486
12487 for {
12488 off := auxIntToInt32(v.AuxInt)
12489 sym := auxToSym(v.Aux)
12490 ptr := v_0
12491 if v_1.Op != OpAMD64MOVLi2f {
12492 break
12493 }
12494 val := v_1.Args[0]
12495 mem := v_2
12496 v.reset(OpAMD64MOVLstore)
12497 v.AuxInt = int32ToAuxInt(off)
12498 v.Aux = symToAux(sym)
12499 v.AddArg3(ptr, val, mem)
12500 return true
12501 }
12502 return false
12503 }
12504 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
12505 v_0 := v.Args[0]
12506 b := v.Block
12507
12508
12509
12510 for {
12511 x := v_0
12512 if x.Op != OpAMD64MOVWload {
12513 break
12514 }
12515 off := auxIntToInt32(x.AuxInt)
12516 sym := auxToSym(x.Aux)
12517 mem := x.Args[1]
12518 ptr := x.Args[0]
12519 if !(x.Uses == 1 && clobber(x)) {
12520 break
12521 }
12522 b = x.Block
12523 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12524 v.copyOf(v0)
12525 v0.AuxInt = int32ToAuxInt(off)
12526 v0.Aux = symToAux(sym)
12527 v0.AddArg2(ptr, mem)
12528 return true
12529 }
12530
12531
12532
12533 for {
12534 x := v_0
12535 if x.Op != OpAMD64MOVLload {
12536 break
12537 }
12538 off := auxIntToInt32(x.AuxInt)
12539 sym := auxToSym(x.Aux)
12540 mem := x.Args[1]
12541 ptr := x.Args[0]
12542 if !(x.Uses == 1 && clobber(x)) {
12543 break
12544 }
12545 b = x.Block
12546 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12547 v.copyOf(v0)
12548 v0.AuxInt = int32ToAuxInt(off)
12549 v0.Aux = symToAux(sym)
12550 v0.AddArg2(ptr, mem)
12551 return true
12552 }
12553
12554
12555
12556 for {
12557 x := v_0
12558 if x.Op != OpAMD64MOVQload {
12559 break
12560 }
12561 off := auxIntToInt32(x.AuxInt)
12562 sym := auxToSym(x.Aux)
12563 mem := x.Args[1]
12564 ptr := x.Args[0]
12565 if !(x.Uses == 1 && clobber(x)) {
12566 break
12567 }
12568 b = x.Block
12569 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12570 v.copyOf(v0)
12571 v0.AuxInt = int32ToAuxInt(off)
12572 v0.Aux = symToAux(sym)
12573 v0.AddArg2(ptr, mem)
12574 return true
12575 }
12576
12577
12578
12579 for {
12580 if v_0.Op != OpAMD64ANDLconst {
12581 break
12582 }
12583 c := auxIntToInt32(v_0.AuxInt)
12584 x := v_0.Args[0]
12585 if !(c&0x8000 == 0) {
12586 break
12587 }
12588 v.reset(OpAMD64ANDLconst)
12589 v.AuxInt = int32ToAuxInt(c & 0x7fff)
12590 v.AddArg(x)
12591 return true
12592 }
12593
12594
12595 for {
12596 if v_0.Op != OpAMD64MOVWQSX {
12597 break
12598 }
12599 x := v_0.Args[0]
12600 v.reset(OpAMD64MOVWQSX)
12601 v.AddArg(x)
12602 return true
12603 }
12604
12605
12606 for {
12607 if v_0.Op != OpAMD64MOVBQSX {
12608 break
12609 }
12610 x := v_0.Args[0]
12611 v.reset(OpAMD64MOVBQSX)
12612 v.AddArg(x)
12613 return true
12614 }
12615 return false
12616 }
12617 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
12618 v_1 := v.Args[1]
12619 v_0 := v.Args[0]
12620
12621
12622
12623 for {
12624 off := auxIntToInt32(v.AuxInt)
12625 sym := auxToSym(v.Aux)
12626 ptr := v_0
12627 if v_1.Op != OpAMD64MOVWstore {
12628 break
12629 }
12630 off2 := auxIntToInt32(v_1.AuxInt)
12631 sym2 := auxToSym(v_1.Aux)
12632 x := v_1.Args[1]
12633 ptr2 := v_1.Args[0]
12634 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12635 break
12636 }
12637 v.reset(OpAMD64MOVWQSX)
12638 v.AddArg(x)
12639 return true
12640 }
12641
12642
12643
12644 for {
12645 off1 := auxIntToInt32(v.AuxInt)
12646 sym1 := auxToSym(v.Aux)
12647 if v_0.Op != OpAMD64LEAQ {
12648 break
12649 }
12650 off2 := auxIntToInt32(v_0.AuxInt)
12651 sym2 := auxToSym(v_0.Aux)
12652 base := v_0.Args[0]
12653 mem := v_1
12654 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12655 break
12656 }
12657 v.reset(OpAMD64MOVWQSXload)
12658 v.AuxInt = int32ToAuxInt(off1 + off2)
12659 v.Aux = symToAux(mergeSym(sym1, sym2))
12660 v.AddArg2(base, mem)
12661 return true
12662 }
12663 return false
12664 }
12665 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
12666 v_0 := v.Args[0]
12667 b := v.Block
12668
12669
12670
12671 for {
12672 x := v_0
12673 if x.Op != OpAMD64MOVWload {
12674 break
12675 }
12676 off := auxIntToInt32(x.AuxInt)
12677 sym := auxToSym(x.Aux)
12678 mem := x.Args[1]
12679 ptr := x.Args[0]
12680 if !(x.Uses == 1 && clobber(x)) {
12681 break
12682 }
12683 b = x.Block
12684 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12685 v.copyOf(v0)
12686 v0.AuxInt = int32ToAuxInt(off)
12687 v0.Aux = symToAux(sym)
12688 v0.AddArg2(ptr, mem)
12689 return true
12690 }
12691
12692
12693
12694 for {
12695 x := v_0
12696 if x.Op != OpAMD64MOVLload {
12697 break
12698 }
12699 off := auxIntToInt32(x.AuxInt)
12700 sym := auxToSym(x.Aux)
12701 mem := x.Args[1]
12702 ptr := x.Args[0]
12703 if !(x.Uses == 1 && clobber(x)) {
12704 break
12705 }
12706 b = x.Block
12707 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12708 v.copyOf(v0)
12709 v0.AuxInt = int32ToAuxInt(off)
12710 v0.Aux = symToAux(sym)
12711 v0.AddArg2(ptr, mem)
12712 return true
12713 }
12714
12715
12716
12717 for {
12718 x := v_0
12719 if x.Op != OpAMD64MOVQload {
12720 break
12721 }
12722 off := auxIntToInt32(x.AuxInt)
12723 sym := auxToSym(x.Aux)
12724 mem := x.Args[1]
12725 ptr := x.Args[0]
12726 if !(x.Uses == 1 && clobber(x)) {
12727 break
12728 }
12729 b = x.Block
12730 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12731 v.copyOf(v0)
12732 v0.AuxInt = int32ToAuxInt(off)
12733 v0.Aux = symToAux(sym)
12734 v0.AddArg2(ptr, mem)
12735 return true
12736 }
12737
12738
12739 for {
12740 if v_0.Op != OpAMD64ANDLconst {
12741 break
12742 }
12743 c := auxIntToInt32(v_0.AuxInt)
12744 x := v_0.Args[0]
12745 v.reset(OpAMD64ANDLconst)
12746 v.AuxInt = int32ToAuxInt(c & 0xffff)
12747 v.AddArg(x)
12748 return true
12749 }
12750
12751
12752 for {
12753 if v_0.Op != OpAMD64MOVWQZX {
12754 break
12755 }
12756 x := v_0.Args[0]
12757 v.reset(OpAMD64MOVWQZX)
12758 v.AddArg(x)
12759 return true
12760 }
12761
12762
12763 for {
12764 if v_0.Op != OpAMD64MOVBQZX {
12765 break
12766 }
12767 x := v_0.Args[0]
12768 v.reset(OpAMD64MOVBQZX)
12769 v.AddArg(x)
12770 return true
12771 }
12772 return false
12773 }
12774 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
12775 v_1 := v.Args[1]
12776 v_0 := v.Args[0]
12777 b := v.Block
12778 config := b.Func.Config
12779
12780
12781
12782 for {
12783 off := auxIntToInt32(v.AuxInt)
12784 sym := auxToSym(v.Aux)
12785 ptr := v_0
12786 if v_1.Op != OpAMD64MOVWstore {
12787 break
12788 }
12789 off2 := auxIntToInt32(v_1.AuxInt)
12790 sym2 := auxToSym(v_1.Aux)
12791 x := v_1.Args[1]
12792 ptr2 := v_1.Args[0]
12793 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12794 break
12795 }
12796 v.reset(OpAMD64MOVWQZX)
12797 v.AddArg(x)
12798 return true
12799 }
12800
12801
12802
12803 for {
12804 off1 := auxIntToInt32(v.AuxInt)
12805 sym := auxToSym(v.Aux)
12806 if v_0.Op != OpAMD64ADDQconst {
12807 break
12808 }
12809 off2 := auxIntToInt32(v_0.AuxInt)
12810 ptr := v_0.Args[0]
12811 mem := v_1
12812 if !(is32Bit(int64(off1) + int64(off2))) {
12813 break
12814 }
12815 v.reset(OpAMD64MOVWload)
12816 v.AuxInt = int32ToAuxInt(off1 + off2)
12817 v.Aux = symToAux(sym)
12818 v.AddArg2(ptr, mem)
12819 return true
12820 }
12821
12822
12823
12824 for {
12825 off1 := auxIntToInt32(v.AuxInt)
12826 sym1 := auxToSym(v.Aux)
12827 if v_0.Op != OpAMD64LEAQ {
12828 break
12829 }
12830 off2 := auxIntToInt32(v_0.AuxInt)
12831 sym2 := auxToSym(v_0.Aux)
12832 base := v_0.Args[0]
12833 mem := v_1
12834 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12835 break
12836 }
12837 v.reset(OpAMD64MOVWload)
12838 v.AuxInt = int32ToAuxInt(off1 + off2)
12839 v.Aux = symToAux(mergeSym(sym1, sym2))
12840 v.AddArg2(base, mem)
12841 return true
12842 }
12843
12844
12845
12846 for {
12847 off := auxIntToInt32(v.AuxInt)
12848 sym := auxToSym(v.Aux)
12849 if v_0.Op != OpSB || !(symIsRO(sym)) {
12850 break
12851 }
12852 v.reset(OpAMD64MOVLconst)
12853 v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
12854 return true
12855 }
12856 return false
12857 }
12858 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
12859 v_2 := v.Args[2]
12860 v_1 := v.Args[1]
12861 v_0 := v.Args[0]
12862
12863
12864 for {
12865 off := auxIntToInt32(v.AuxInt)
12866 sym := auxToSym(v.Aux)
12867 ptr := v_0
12868 if v_1.Op != OpAMD64MOVWQSX {
12869 break
12870 }
12871 x := v_1.Args[0]
12872 mem := v_2
12873 v.reset(OpAMD64MOVWstore)
12874 v.AuxInt = int32ToAuxInt(off)
12875 v.Aux = symToAux(sym)
12876 v.AddArg3(ptr, x, mem)
12877 return true
12878 }
12879
12880
12881 for {
12882 off := auxIntToInt32(v.AuxInt)
12883 sym := auxToSym(v.Aux)
12884 ptr := v_0
12885 if v_1.Op != OpAMD64MOVWQZX {
12886 break
12887 }
12888 x := v_1.Args[0]
12889 mem := v_2
12890 v.reset(OpAMD64MOVWstore)
12891 v.AuxInt = int32ToAuxInt(off)
12892 v.Aux = symToAux(sym)
12893 v.AddArg3(ptr, x, mem)
12894 return true
12895 }
12896
12897
12898
12899 for {
12900 off1 := auxIntToInt32(v.AuxInt)
12901 sym := auxToSym(v.Aux)
12902 if v_0.Op != OpAMD64ADDQconst {
12903 break
12904 }
12905 off2 := auxIntToInt32(v_0.AuxInt)
12906 ptr := v_0.Args[0]
12907 val := v_1
12908 mem := v_2
12909 if !(is32Bit(int64(off1) + int64(off2))) {
12910 break
12911 }
12912 v.reset(OpAMD64MOVWstore)
12913 v.AuxInt = int32ToAuxInt(off1 + off2)
12914 v.Aux = symToAux(sym)
12915 v.AddArg3(ptr, val, mem)
12916 return true
12917 }
12918
12919
12920 for {
12921 off := auxIntToInt32(v.AuxInt)
12922 sym := auxToSym(v.Aux)
12923 ptr := v_0
12924 if v_1.Op != OpAMD64MOVLconst {
12925 break
12926 }
12927 c := auxIntToInt32(v_1.AuxInt)
12928 mem := v_2
12929 v.reset(OpAMD64MOVWstoreconst)
12930 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
12931 v.Aux = symToAux(sym)
12932 v.AddArg2(ptr, mem)
12933 return true
12934 }
12935
12936
12937 for {
12938 off := auxIntToInt32(v.AuxInt)
12939 sym := auxToSym(v.Aux)
12940 ptr := v_0
12941 if v_1.Op != OpAMD64MOVQconst {
12942 break
12943 }
12944 c := auxIntToInt64(v_1.AuxInt)
12945 mem := v_2
12946 v.reset(OpAMD64MOVWstoreconst)
12947 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
12948 v.Aux = symToAux(sym)
12949 v.AddArg2(ptr, mem)
12950 return true
12951 }
12952
12953
12954
12955 for {
12956 off1 := auxIntToInt32(v.AuxInt)
12957 sym1 := auxToSym(v.Aux)
12958 if v_0.Op != OpAMD64LEAQ {
12959 break
12960 }
12961 off2 := auxIntToInt32(v_0.AuxInt)
12962 sym2 := auxToSym(v_0.Aux)
12963 base := v_0.Args[0]
12964 val := v_1
12965 mem := v_2
12966 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12967 break
12968 }
12969 v.reset(OpAMD64MOVWstore)
12970 v.AuxInt = int32ToAuxInt(off1 + off2)
12971 v.Aux = symToAux(mergeSym(sym1, sym2))
12972 v.AddArg3(base, val, mem)
12973 return true
12974 }
12975
12976
12977
12978 for {
12979 i := auxIntToInt32(v.AuxInt)
12980 s := auxToSym(v.Aux)
12981 p := v_0
12982 x := v_1
12983 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
12984 break
12985 }
12986 w := x.Args[0]
12987 mem := v_2
12988 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12989 break
12990 }
12991 v.reset(OpAMD64MOVBEWstore)
12992 v.AuxInt = int32ToAuxInt(i)
12993 v.Aux = symToAux(s)
12994 v.AddArg3(p, w, mem)
12995 return true
12996 }
12997 return false
12998 }
12999 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
13000 v_1 := v.Args[1]
13001 v_0 := v.Args[0]
13002
13003
13004
13005 for {
13006 sc := auxIntToValAndOff(v.AuxInt)
13007 s := auxToSym(v.Aux)
13008 if v_0.Op != OpAMD64ADDQconst {
13009 break
13010 }
13011 off := auxIntToInt32(v_0.AuxInt)
13012 ptr := v_0.Args[0]
13013 mem := v_1
13014 if !(ValAndOff(sc).canAdd32(off)) {
13015 break
13016 }
13017 v.reset(OpAMD64MOVWstoreconst)
13018 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13019 v.Aux = symToAux(s)
13020 v.AddArg2(ptr, mem)
13021 return true
13022 }
13023
13024
13025
13026 for {
13027 sc := auxIntToValAndOff(v.AuxInt)
13028 sym1 := auxToSym(v.Aux)
13029 if v_0.Op != OpAMD64LEAQ {
13030 break
13031 }
13032 off := auxIntToInt32(v_0.AuxInt)
13033 sym2 := auxToSym(v_0.Aux)
13034 ptr := v_0.Args[0]
13035 mem := v_1
13036 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
13037 break
13038 }
13039 v.reset(OpAMD64MOVWstoreconst)
13040 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13041 v.Aux = symToAux(mergeSym(sym1, sym2))
13042 v.AddArg2(ptr, mem)
13043 return true
13044 }
13045 return false
13046 }
13047 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
13048 v_1 := v.Args[1]
13049 v_0 := v.Args[0]
13050
13051
13052 for {
13053 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13054 x := v_0
13055 if v_1.Op != OpAMD64MOVLconst {
13056 continue
13057 }
13058 c := auxIntToInt32(v_1.AuxInt)
13059 v.reset(OpAMD64MULLconst)
13060 v.AuxInt = int32ToAuxInt(c)
13061 v.AddArg(x)
13062 return true
13063 }
13064 break
13065 }
13066 return false
13067 }
13068 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
13069 v_0 := v.Args[0]
13070 b := v.Block
13071
13072
13073 for {
13074 c := auxIntToInt32(v.AuxInt)
13075 if v_0.Op != OpAMD64MULLconst {
13076 break
13077 }
13078 d := auxIntToInt32(v_0.AuxInt)
13079 x := v_0.Args[0]
13080 v.reset(OpAMD64MULLconst)
13081 v.AuxInt = int32ToAuxInt(c * d)
13082 v.AddArg(x)
13083 return true
13084 }
13085
13086
13087 for {
13088 if auxIntToInt32(v.AuxInt) != -9 {
13089 break
13090 }
13091 x := v_0
13092 v.reset(OpAMD64NEGL)
13093 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13094 v0.AddArg2(x, x)
13095 v.AddArg(v0)
13096 return true
13097 }
13098
13099
13100 for {
13101 if auxIntToInt32(v.AuxInt) != -5 {
13102 break
13103 }
13104 x := v_0
13105 v.reset(OpAMD64NEGL)
13106 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13107 v0.AddArg2(x, x)
13108 v.AddArg(v0)
13109 return true
13110 }
13111
13112
13113 for {
13114 if auxIntToInt32(v.AuxInt) != -3 {
13115 break
13116 }
13117 x := v_0
13118 v.reset(OpAMD64NEGL)
13119 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13120 v0.AddArg2(x, x)
13121 v.AddArg(v0)
13122 return true
13123 }
13124
13125
13126 for {
13127 if auxIntToInt32(v.AuxInt) != -1 {
13128 break
13129 }
13130 x := v_0
13131 v.reset(OpAMD64NEGL)
13132 v.AddArg(x)
13133 return true
13134 }
13135
13136
13137 for {
13138 if auxIntToInt32(v.AuxInt) != 0 {
13139 break
13140 }
13141 v.reset(OpAMD64MOVLconst)
13142 v.AuxInt = int32ToAuxInt(0)
13143 return true
13144 }
13145
13146
13147 for {
13148 if auxIntToInt32(v.AuxInt) != 1 {
13149 break
13150 }
13151 x := v_0
13152 v.copyOf(x)
13153 return true
13154 }
13155
13156
13157 for {
13158 if auxIntToInt32(v.AuxInt) != 3 {
13159 break
13160 }
13161 x := v_0
13162 v.reset(OpAMD64LEAL2)
13163 v.AddArg2(x, x)
13164 return true
13165 }
13166
13167
13168 for {
13169 if auxIntToInt32(v.AuxInt) != 5 {
13170 break
13171 }
13172 x := v_0
13173 v.reset(OpAMD64LEAL4)
13174 v.AddArg2(x, x)
13175 return true
13176 }
13177
13178
13179 for {
13180 if auxIntToInt32(v.AuxInt) != 7 {
13181 break
13182 }
13183 x := v_0
13184 v.reset(OpAMD64LEAL2)
13185 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13186 v0.AddArg2(x, x)
13187 v.AddArg2(x, v0)
13188 return true
13189 }
13190
13191
13192 for {
13193 if auxIntToInt32(v.AuxInt) != 9 {
13194 break
13195 }
13196 x := v_0
13197 v.reset(OpAMD64LEAL8)
13198 v.AddArg2(x, x)
13199 return true
13200 }
13201
13202
13203 for {
13204 if auxIntToInt32(v.AuxInt) != 11 {
13205 break
13206 }
13207 x := v_0
13208 v.reset(OpAMD64LEAL2)
13209 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13210 v0.AddArg2(x, x)
13211 v.AddArg2(x, v0)
13212 return true
13213 }
13214
13215
13216 for {
13217 if auxIntToInt32(v.AuxInt) != 13 {
13218 break
13219 }
13220 x := v_0
13221 v.reset(OpAMD64LEAL4)
13222 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13223 v0.AddArg2(x, x)
13224 v.AddArg2(x, v0)
13225 return true
13226 }
13227
13228
13229 for {
13230 if auxIntToInt32(v.AuxInt) != 19 {
13231 break
13232 }
13233 x := v_0
13234 v.reset(OpAMD64LEAL2)
13235 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13236 v0.AddArg2(x, x)
13237 v.AddArg2(x, v0)
13238 return true
13239 }
13240
13241
13242 for {
13243 if auxIntToInt32(v.AuxInt) != 21 {
13244 break
13245 }
13246 x := v_0
13247 v.reset(OpAMD64LEAL4)
13248 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13249 v0.AddArg2(x, x)
13250 v.AddArg2(x, v0)
13251 return true
13252 }
13253
13254
13255 for {
13256 if auxIntToInt32(v.AuxInt) != 25 {
13257 break
13258 }
13259 x := v_0
13260 v.reset(OpAMD64LEAL8)
13261 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13262 v0.AddArg2(x, x)
13263 v.AddArg2(x, v0)
13264 return true
13265 }
13266
13267
13268 for {
13269 if auxIntToInt32(v.AuxInt) != 27 {
13270 break
13271 }
13272 x := v_0
13273 v.reset(OpAMD64LEAL8)
13274 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13275 v0.AddArg2(x, x)
13276 v.AddArg2(v0, v0)
13277 return true
13278 }
13279
13280
13281 for {
13282 if auxIntToInt32(v.AuxInt) != 37 {
13283 break
13284 }
13285 x := v_0
13286 v.reset(OpAMD64LEAL4)
13287 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13288 v0.AddArg2(x, x)
13289 v.AddArg2(x, v0)
13290 return true
13291 }
13292
13293
13294 for {
13295 if auxIntToInt32(v.AuxInt) != 41 {
13296 break
13297 }
13298 x := v_0
13299 v.reset(OpAMD64LEAL8)
13300 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13301 v0.AddArg2(x, x)
13302 v.AddArg2(x, v0)
13303 return true
13304 }
13305
13306
13307 for {
13308 if auxIntToInt32(v.AuxInt) != 45 {
13309 break
13310 }
13311 x := v_0
13312 v.reset(OpAMD64LEAL8)
13313 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13314 v0.AddArg2(x, x)
13315 v.AddArg2(v0, v0)
13316 return true
13317 }
13318
13319
13320 for {
13321 if auxIntToInt32(v.AuxInt) != 73 {
13322 break
13323 }
13324 x := v_0
13325 v.reset(OpAMD64LEAL8)
13326 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13327 v0.AddArg2(x, x)
13328 v.AddArg2(x, v0)
13329 return true
13330 }
13331
13332
13333 for {
13334 if auxIntToInt32(v.AuxInt) != 81 {
13335 break
13336 }
13337 x := v_0
13338 v.reset(OpAMD64LEAL8)
13339 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13340 v0.AddArg2(x, x)
13341 v.AddArg2(v0, v0)
13342 return true
13343 }
13344
13345
13346
13347 for {
13348 c := auxIntToInt32(v.AuxInt)
13349 x := v_0
13350 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
13351 break
13352 }
13353 v.reset(OpAMD64SUBL)
13354 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13355 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
13356 v0.AddArg(x)
13357 v.AddArg2(v0, x)
13358 return true
13359 }
13360
13361
13362
13363 for {
13364 c := auxIntToInt32(v.AuxInt)
13365 x := v_0
13366 if !(isPowerOfTwo32(c-1) && c >= 17) {
13367 break
13368 }
13369 v.reset(OpAMD64LEAL1)
13370 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13371 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
13372 v0.AddArg(x)
13373 v.AddArg2(v0, x)
13374 return true
13375 }
13376
13377
13378
13379 for {
13380 c := auxIntToInt32(v.AuxInt)
13381 x := v_0
13382 if !(isPowerOfTwo32(c-2) && c >= 34) {
13383 break
13384 }
13385 v.reset(OpAMD64LEAL2)
13386 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13387 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
13388 v0.AddArg(x)
13389 v.AddArg2(v0, x)
13390 return true
13391 }
13392
13393
13394
13395 for {
13396 c := auxIntToInt32(v.AuxInt)
13397 x := v_0
13398 if !(isPowerOfTwo32(c-4) && c >= 68) {
13399 break
13400 }
13401 v.reset(OpAMD64LEAL4)
13402 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13403 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
13404 v0.AddArg(x)
13405 v.AddArg2(v0, x)
13406 return true
13407 }
13408
13409
13410
13411 for {
13412 c := auxIntToInt32(v.AuxInt)
13413 x := v_0
13414 if !(isPowerOfTwo32(c-8) && c >= 136) {
13415 break
13416 }
13417 v.reset(OpAMD64LEAL8)
13418 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13419 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
13420 v0.AddArg(x)
13421 v.AddArg2(v0, x)
13422 return true
13423 }
13424
13425
13426
13427 for {
13428 c := auxIntToInt32(v.AuxInt)
13429 x := v_0
13430 if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
13431 break
13432 }
13433 v.reset(OpAMD64SHLLconst)
13434 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
13435 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13436 v0.AddArg2(x, x)
13437 v.AddArg(v0)
13438 return true
13439 }
13440
13441
13442
13443 for {
13444 c := auxIntToInt32(v.AuxInt)
13445 x := v_0
13446 if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
13447 break
13448 }
13449 v.reset(OpAMD64SHLLconst)
13450 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
13451 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13452 v0.AddArg2(x, x)
13453 v.AddArg(v0)
13454 return true
13455 }
13456
13457
13458
13459 for {
13460 c := auxIntToInt32(v.AuxInt)
13461 x := v_0
13462 if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
13463 break
13464 }
13465 v.reset(OpAMD64SHLLconst)
13466 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
13467 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13468 v0.AddArg2(x, x)
13469 v.AddArg(v0)
13470 return true
13471 }
13472
13473
13474 for {
13475 c := auxIntToInt32(v.AuxInt)
13476 if v_0.Op != OpAMD64MOVLconst {
13477 break
13478 }
13479 d := auxIntToInt32(v_0.AuxInt)
13480 v.reset(OpAMD64MOVLconst)
13481 v.AuxInt = int32ToAuxInt(c * d)
13482 return true
13483 }
13484 return false
13485 }
13486 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
13487 v_1 := v.Args[1]
13488 v_0 := v.Args[0]
13489
13490
13491
13492 for {
13493 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13494 x := v_0
13495 if v_1.Op != OpAMD64MOVQconst {
13496 continue
13497 }
13498 c := auxIntToInt64(v_1.AuxInt)
13499 if !(is32Bit(c)) {
13500 continue
13501 }
13502 v.reset(OpAMD64MULQconst)
13503 v.AuxInt = int32ToAuxInt(int32(c))
13504 v.AddArg(x)
13505 return true
13506 }
13507 break
13508 }
13509 return false
13510 }
13511 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
13512 v_0 := v.Args[0]
13513 b := v.Block
13514
13515
13516
13517 for {
13518 c := auxIntToInt32(v.AuxInt)
13519 if v_0.Op != OpAMD64MULQconst {
13520 break
13521 }
13522 d := auxIntToInt32(v_0.AuxInt)
13523 x := v_0.Args[0]
13524 if !(is32Bit(int64(c) * int64(d))) {
13525 break
13526 }
13527 v.reset(OpAMD64MULQconst)
13528 v.AuxInt = int32ToAuxInt(c * d)
13529 v.AddArg(x)
13530 return true
13531 }
13532
13533
13534 for {
13535 if auxIntToInt32(v.AuxInt) != -9 {
13536 break
13537 }
13538 x := v_0
13539 v.reset(OpAMD64NEGQ)
13540 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13541 v0.AddArg2(x, x)
13542 v.AddArg(v0)
13543 return true
13544 }
13545
13546
13547 for {
13548 if auxIntToInt32(v.AuxInt) != -5 {
13549 break
13550 }
13551 x := v_0
13552 v.reset(OpAMD64NEGQ)
13553 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13554 v0.AddArg2(x, x)
13555 v.AddArg(v0)
13556 return true
13557 }
13558
13559
13560 for {
13561 if auxIntToInt32(v.AuxInt) != -3 {
13562 break
13563 }
13564 x := v_0
13565 v.reset(OpAMD64NEGQ)
13566 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13567 v0.AddArg2(x, x)
13568 v.AddArg(v0)
13569 return true
13570 }
13571
13572
13573 for {
13574 if auxIntToInt32(v.AuxInt) != -1 {
13575 break
13576 }
13577 x := v_0
13578 v.reset(OpAMD64NEGQ)
13579 v.AddArg(x)
13580 return true
13581 }
13582
13583
13584 for {
13585 if auxIntToInt32(v.AuxInt) != 0 {
13586 break
13587 }
13588 v.reset(OpAMD64MOVQconst)
13589 v.AuxInt = int64ToAuxInt(0)
13590 return true
13591 }
13592
13593
13594 for {
13595 if auxIntToInt32(v.AuxInt) != 1 {
13596 break
13597 }
13598 x := v_0
13599 v.copyOf(x)
13600 return true
13601 }
13602
13603
13604 for {
13605 if auxIntToInt32(v.AuxInt) != 3 {
13606 break
13607 }
13608 x := v_0
13609 v.reset(OpAMD64LEAQ2)
13610 v.AddArg2(x, x)
13611 return true
13612 }
13613
13614
13615 for {
13616 if auxIntToInt32(v.AuxInt) != 5 {
13617 break
13618 }
13619 x := v_0
13620 v.reset(OpAMD64LEAQ4)
13621 v.AddArg2(x, x)
13622 return true
13623 }
13624
13625
13626 for {
13627 if auxIntToInt32(v.AuxInt) != 7 {
13628 break
13629 }
13630 x := v_0
13631 v.reset(OpAMD64LEAQ2)
13632 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13633 v0.AddArg2(x, x)
13634 v.AddArg2(x, v0)
13635 return true
13636 }
13637
13638
13639 for {
13640 if auxIntToInt32(v.AuxInt) != 9 {
13641 break
13642 }
13643 x := v_0
13644 v.reset(OpAMD64LEAQ8)
13645 v.AddArg2(x, x)
13646 return true
13647 }
13648
13649
13650 for {
13651 if auxIntToInt32(v.AuxInt) != 11 {
13652 break
13653 }
13654 x := v_0
13655 v.reset(OpAMD64LEAQ2)
13656 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13657 v0.AddArg2(x, x)
13658 v.AddArg2(x, v0)
13659 return true
13660 }
13661
13662
13663 for {
13664 if auxIntToInt32(v.AuxInt) != 13 {
13665 break
13666 }
13667 x := v_0
13668 v.reset(OpAMD64LEAQ4)
13669 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13670 v0.AddArg2(x, x)
13671 v.AddArg2(x, v0)
13672 return true
13673 }
13674
13675
13676 for {
13677 if auxIntToInt32(v.AuxInt) != 19 {
13678 break
13679 }
13680 x := v_0
13681 v.reset(OpAMD64LEAQ2)
13682 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13683 v0.AddArg2(x, x)
13684 v.AddArg2(x, v0)
13685 return true
13686 }
13687
13688
13689 for {
13690 if auxIntToInt32(v.AuxInt) != 21 {
13691 break
13692 }
13693 x := v_0
13694 v.reset(OpAMD64LEAQ4)
13695 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13696 v0.AddArg2(x, x)
13697 v.AddArg2(x, v0)
13698 return true
13699 }
13700
13701
13702 for {
13703 if auxIntToInt32(v.AuxInt) != 25 {
13704 break
13705 }
13706 x := v_0
13707 v.reset(OpAMD64LEAQ8)
13708 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13709 v0.AddArg2(x, x)
13710 v.AddArg2(x, v0)
13711 return true
13712 }
13713
13714
13715 for {
13716 if auxIntToInt32(v.AuxInt) != 27 {
13717 break
13718 }
13719 x := v_0
13720 v.reset(OpAMD64LEAQ8)
13721 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13722 v0.AddArg2(x, x)
13723 v.AddArg2(v0, v0)
13724 return true
13725 }
13726
13727
13728 for {
13729 if auxIntToInt32(v.AuxInt) != 37 {
13730 break
13731 }
13732 x := v_0
13733 v.reset(OpAMD64LEAQ4)
13734 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13735 v0.AddArg2(x, x)
13736 v.AddArg2(x, v0)
13737 return true
13738 }
13739
13740
13741 for {
13742 if auxIntToInt32(v.AuxInt) != 41 {
13743 break
13744 }
13745 x := v_0
13746 v.reset(OpAMD64LEAQ8)
13747 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13748 v0.AddArg2(x, x)
13749 v.AddArg2(x, v0)
13750 return true
13751 }
13752
13753
13754 for {
13755 if auxIntToInt32(v.AuxInt) != 45 {
13756 break
13757 }
13758 x := v_0
13759 v.reset(OpAMD64LEAQ8)
13760 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13761 v0.AddArg2(x, x)
13762 v.AddArg2(v0, v0)
13763 return true
13764 }
13765
13766
13767 for {
13768 if auxIntToInt32(v.AuxInt) != 73 {
13769 break
13770 }
13771 x := v_0
13772 v.reset(OpAMD64LEAQ8)
13773 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13774 v0.AddArg2(x, x)
13775 v.AddArg2(x, v0)
13776 return true
13777 }
13778
13779
13780 for {
13781 if auxIntToInt32(v.AuxInt) != 81 {
13782 break
13783 }
13784 x := v_0
13785 v.reset(OpAMD64LEAQ8)
13786 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13787 v0.AddArg2(x, x)
13788 v.AddArg2(v0, v0)
13789 return true
13790 }
13791
13792
13793
13794 for {
13795 c := auxIntToInt32(v.AuxInt)
13796 x := v_0
13797 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
13798 break
13799 }
13800 v.reset(OpAMD64SUBQ)
13801 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13802 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
13803 v0.AddArg(x)
13804 v.AddArg2(v0, x)
13805 return true
13806 }
13807
13808
13809
13810 for {
13811 c := auxIntToInt32(v.AuxInt)
13812 x := v_0
13813 if !(isPowerOfTwo32(c-1) && c >= 17) {
13814 break
13815 }
13816 v.reset(OpAMD64LEAQ1)
13817 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13818 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
13819 v0.AddArg(x)
13820 v.AddArg2(v0, x)
13821 return true
13822 }
13823
13824
13825
13826 for {
13827 c := auxIntToInt32(v.AuxInt)
13828 x := v_0
13829 if !(isPowerOfTwo32(c-2) && c >= 34) {
13830 break
13831 }
13832 v.reset(OpAMD64LEAQ2)
13833 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13834 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
13835 v0.AddArg(x)
13836 v.AddArg2(v0, x)
13837 return true
13838 }
13839
13840
13841
13842 for {
13843 c := auxIntToInt32(v.AuxInt)
13844 x := v_0
13845 if !(isPowerOfTwo32(c-4) && c >= 68) {
13846 break
13847 }
13848 v.reset(OpAMD64LEAQ4)
13849 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13850 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
13851 v0.AddArg(x)
13852 v.AddArg2(v0, x)
13853 return true
13854 }
13855
13856
13857
13858 for {
13859 c := auxIntToInt32(v.AuxInt)
13860 x := v_0
13861 if !(isPowerOfTwo32(c-8) && c >= 136) {
13862 break
13863 }
13864 v.reset(OpAMD64LEAQ8)
13865 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13866 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
13867 v0.AddArg(x)
13868 v.AddArg2(v0, x)
13869 return true
13870 }
13871
13872
13873
13874 for {
13875 c := auxIntToInt32(v.AuxInt)
13876 x := v_0
13877 if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
13878 break
13879 }
13880 v.reset(OpAMD64SHLQconst)
13881 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
13882 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13883 v0.AddArg2(x, x)
13884 v.AddArg(v0)
13885 return true
13886 }
13887
13888
13889
13890 for {
13891 c := auxIntToInt32(v.AuxInt)
13892 x := v_0
13893 if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
13894 break
13895 }
13896 v.reset(OpAMD64SHLQconst)
13897 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
13898 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13899 v0.AddArg2(x, x)
13900 v.AddArg(v0)
13901 return true
13902 }
13903
13904
13905
13906 for {
13907 c := auxIntToInt32(v.AuxInt)
13908 x := v_0
13909 if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
13910 break
13911 }
13912 v.reset(OpAMD64SHLQconst)
13913 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
13914 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13915 v0.AddArg2(x, x)
13916 v.AddArg(v0)
13917 return true
13918 }
13919
13920
13921 for {
13922 c := auxIntToInt32(v.AuxInt)
13923 if v_0.Op != OpAMD64MOVQconst {
13924 break
13925 }
13926 d := auxIntToInt64(v_0.AuxInt)
13927 v.reset(OpAMD64MOVQconst)
13928 v.AuxInt = int64ToAuxInt(int64(c) * d)
13929 return true
13930 }
13931
13932
13933
13934 for {
13935 c := auxIntToInt32(v.AuxInt)
13936 if v_0.Op != OpAMD64NEGQ {
13937 break
13938 }
13939 x := v_0.Args[0]
13940 if !(c != -(1 << 31)) {
13941 break
13942 }
13943 v.reset(OpAMD64MULQconst)
13944 v.AuxInt = int32ToAuxInt(-c)
13945 v.AddArg(x)
13946 return true
13947 }
13948 return false
13949 }
13950 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
13951 v_1 := v.Args[1]
13952 v_0 := v.Args[0]
13953
13954
13955
13956 for {
13957 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13958 x := v_0
13959 l := v_1
13960 if l.Op != OpAMD64MOVSDload {
13961 continue
13962 }
13963 off := auxIntToInt32(l.AuxInt)
13964 sym := auxToSym(l.Aux)
13965 mem := l.Args[1]
13966 ptr := l.Args[0]
13967 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
13968 continue
13969 }
13970 v.reset(OpAMD64MULSDload)
13971 v.AuxInt = int32ToAuxInt(off)
13972 v.Aux = symToAux(sym)
13973 v.AddArg3(x, ptr, mem)
13974 return true
13975 }
13976 break
13977 }
13978 return false
13979 }
13980 func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
13981 v_2 := v.Args[2]
13982 v_1 := v.Args[1]
13983 v_0 := v.Args[0]
13984 b := v.Block
13985 typ := &b.Func.Config.Types
13986
13987
13988
13989 for {
13990 off1 := auxIntToInt32(v.AuxInt)
13991 sym := auxToSym(v.Aux)
13992 val := v_0
13993 if v_1.Op != OpAMD64ADDQconst {
13994 break
13995 }
13996 off2 := auxIntToInt32(v_1.AuxInt)
13997 base := v_1.Args[0]
13998 mem := v_2
13999 if !(is32Bit(int64(off1) + int64(off2))) {
14000 break
14001 }
14002 v.reset(OpAMD64MULSDload)
14003 v.AuxInt = int32ToAuxInt(off1 + off2)
14004 v.Aux = symToAux(sym)
14005 v.AddArg3(val, base, mem)
14006 return true
14007 }
14008
14009
14010
14011 for {
14012 off1 := auxIntToInt32(v.AuxInt)
14013 sym1 := auxToSym(v.Aux)
14014 val := v_0
14015 if v_1.Op != OpAMD64LEAQ {
14016 break
14017 }
14018 off2 := auxIntToInt32(v_1.AuxInt)
14019 sym2 := auxToSym(v_1.Aux)
14020 base := v_1.Args[0]
14021 mem := v_2
14022 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14023 break
14024 }
14025 v.reset(OpAMD64MULSDload)
14026 v.AuxInt = int32ToAuxInt(off1 + off2)
14027 v.Aux = symToAux(mergeSym(sym1, sym2))
14028 v.AddArg3(val, base, mem)
14029 return true
14030 }
14031
14032
14033 for {
14034 off := auxIntToInt32(v.AuxInt)
14035 sym := auxToSym(v.Aux)
14036 x := v_0
14037 ptr := v_1
14038 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14039 break
14040 }
14041 y := v_2.Args[1]
14042 if ptr != v_2.Args[0] {
14043 break
14044 }
14045 v.reset(OpAMD64MULSD)
14046 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
14047 v0.AddArg(y)
14048 v.AddArg2(x, v0)
14049 return true
14050 }
14051 return false
14052 }
14053 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
14054 v_1 := v.Args[1]
14055 v_0 := v.Args[0]
14056
14057
14058
14059 for {
14060 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14061 x := v_0
14062 l := v_1
14063 if l.Op != OpAMD64MOVSSload {
14064 continue
14065 }
14066 off := auxIntToInt32(l.AuxInt)
14067 sym := auxToSym(l.Aux)
14068 mem := l.Args[1]
14069 ptr := l.Args[0]
14070 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14071 continue
14072 }
14073 v.reset(OpAMD64MULSSload)
14074 v.AuxInt = int32ToAuxInt(off)
14075 v.Aux = symToAux(sym)
14076 v.AddArg3(x, ptr, mem)
14077 return true
14078 }
14079 break
14080 }
14081 return false
14082 }
14083 func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
14084 v_2 := v.Args[2]
14085 v_1 := v.Args[1]
14086 v_0 := v.Args[0]
14087 b := v.Block
14088 typ := &b.Func.Config.Types
14089
14090
14091
14092 for {
14093 off1 := auxIntToInt32(v.AuxInt)
14094 sym := auxToSym(v.Aux)
14095 val := v_0
14096 if v_1.Op != OpAMD64ADDQconst {
14097 break
14098 }
14099 off2 := auxIntToInt32(v_1.AuxInt)
14100 base := v_1.Args[0]
14101 mem := v_2
14102 if !(is32Bit(int64(off1) + int64(off2))) {
14103 break
14104 }
14105 v.reset(OpAMD64MULSSload)
14106 v.AuxInt = int32ToAuxInt(off1 + off2)
14107 v.Aux = symToAux(sym)
14108 v.AddArg3(val, base, mem)
14109 return true
14110 }
14111
14112
14113
14114 for {
14115 off1 := auxIntToInt32(v.AuxInt)
14116 sym1 := auxToSym(v.Aux)
14117 val := v_0
14118 if v_1.Op != OpAMD64LEAQ {
14119 break
14120 }
14121 off2 := auxIntToInt32(v_1.AuxInt)
14122 sym2 := auxToSym(v_1.Aux)
14123 base := v_1.Args[0]
14124 mem := v_2
14125 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14126 break
14127 }
14128 v.reset(OpAMD64MULSSload)
14129 v.AuxInt = int32ToAuxInt(off1 + off2)
14130 v.Aux = symToAux(mergeSym(sym1, sym2))
14131 v.AddArg3(val, base, mem)
14132 return true
14133 }
14134
14135
14136 for {
14137 off := auxIntToInt32(v.AuxInt)
14138 sym := auxToSym(v.Aux)
14139 x := v_0
14140 ptr := v_1
14141 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14142 break
14143 }
14144 y := v_2.Args[1]
14145 if ptr != v_2.Args[0] {
14146 break
14147 }
14148 v.reset(OpAMD64MULSS)
14149 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
14150 v0.AddArg(y)
14151 v.AddArg2(x, v0)
14152 return true
14153 }
14154 return false
14155 }
14156 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
14157 v_0 := v.Args[0]
14158
14159
14160 for {
14161 if v_0.Op != OpAMD64NEGL {
14162 break
14163 }
14164 x := v_0.Args[0]
14165 v.copyOf(x)
14166 return true
14167 }
14168
14169
14170
14171 for {
14172 s := v_0
14173 if s.Op != OpAMD64SUBL {
14174 break
14175 }
14176 y := s.Args[1]
14177 x := s.Args[0]
14178 if !(s.Uses == 1) {
14179 break
14180 }
14181 v.reset(OpAMD64SUBL)
14182 v.AddArg2(y, x)
14183 return true
14184 }
14185
14186
14187 for {
14188 if v_0.Op != OpAMD64MOVLconst {
14189 break
14190 }
14191 c := auxIntToInt32(v_0.AuxInt)
14192 v.reset(OpAMD64MOVLconst)
14193 v.AuxInt = int32ToAuxInt(-c)
14194 return true
14195 }
14196 return false
14197 }
14198 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
14199 v_0 := v.Args[0]
14200
14201
14202 for {
14203 if v_0.Op != OpAMD64NEGQ {
14204 break
14205 }
14206 x := v_0.Args[0]
14207 v.copyOf(x)
14208 return true
14209 }
14210
14211
14212
14213 for {
14214 s := v_0
14215 if s.Op != OpAMD64SUBQ {
14216 break
14217 }
14218 y := s.Args[1]
14219 x := s.Args[0]
14220 if !(s.Uses == 1) {
14221 break
14222 }
14223 v.reset(OpAMD64SUBQ)
14224 v.AddArg2(y, x)
14225 return true
14226 }
14227
14228
14229 for {
14230 if v_0.Op != OpAMD64MOVQconst {
14231 break
14232 }
14233 c := auxIntToInt64(v_0.AuxInt)
14234 v.reset(OpAMD64MOVQconst)
14235 v.AuxInt = int64ToAuxInt(-c)
14236 return true
14237 }
14238
14239
14240
14241 for {
14242 if v_0.Op != OpAMD64ADDQconst {
14243 break
14244 }
14245 c := auxIntToInt32(v_0.AuxInt)
14246 v_0_0 := v_0.Args[0]
14247 if v_0_0.Op != OpAMD64NEGQ {
14248 break
14249 }
14250 x := v_0_0.Args[0]
14251 if !(c != -(1 << 31)) {
14252 break
14253 }
14254 v.reset(OpAMD64ADDQconst)
14255 v.AuxInt = int32ToAuxInt(-c)
14256 v.AddArg(x)
14257 return true
14258 }
14259 return false
14260 }
14261 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
14262 v_0 := v.Args[0]
14263
14264
14265 for {
14266 if v_0.Op != OpAMD64MOVLconst {
14267 break
14268 }
14269 c := auxIntToInt32(v_0.AuxInt)
14270 v.reset(OpAMD64MOVLconst)
14271 v.AuxInt = int32ToAuxInt(^c)
14272 return true
14273 }
14274 return false
14275 }
14276 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
14277 v_0 := v.Args[0]
14278
14279
14280 for {
14281 if v_0.Op != OpAMD64MOVQconst {
14282 break
14283 }
14284 c := auxIntToInt64(v_0.AuxInt)
14285 v.reset(OpAMD64MOVQconst)
14286 v.AuxInt = int64ToAuxInt(^c)
14287 return true
14288 }
14289 return false
14290 }
14291 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
14292 v_1 := v.Args[1]
14293 v_0 := v.Args[0]
14294
14295
14296 for {
14297 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14298 if v_0.Op != OpAMD64SHLL {
14299 continue
14300 }
14301 y := v_0.Args[1]
14302 v_0_0 := v_0.Args[0]
14303 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
14304 continue
14305 }
14306 x := v_1
14307 v.reset(OpAMD64BTSL)
14308 v.AddArg2(x, y)
14309 return true
14310 }
14311 break
14312 }
14313
14314
14315 for {
14316 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14317 x := v_0
14318 if v_1.Op != OpAMD64MOVLconst {
14319 continue
14320 }
14321 c := auxIntToInt32(v_1.AuxInt)
14322 v.reset(OpAMD64ORLconst)
14323 v.AuxInt = int32ToAuxInt(c)
14324 v.AddArg(x)
14325 return true
14326 }
14327 break
14328 }
14329
14330
14331 for {
14332 x := v_0
14333 if x != v_1 {
14334 break
14335 }
14336 v.copyOf(x)
14337 return true
14338 }
14339
14340
14341
14342 for {
14343 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14344 x := v_0
14345 l := v_1
14346 if l.Op != OpAMD64MOVLload {
14347 continue
14348 }
14349 off := auxIntToInt32(l.AuxInt)
14350 sym := auxToSym(l.Aux)
14351 mem := l.Args[1]
14352 ptr := l.Args[0]
14353 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14354 continue
14355 }
14356 v.reset(OpAMD64ORLload)
14357 v.AuxInt = int32ToAuxInt(off)
14358 v.Aux = symToAux(sym)
14359 v.AddArg3(x, ptr, mem)
14360 return true
14361 }
14362 break
14363 }
14364 return false
14365 }
14366 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
14367 v_0 := v.Args[0]
14368
14369
14370 for {
14371 c := auxIntToInt32(v.AuxInt)
14372 if v_0.Op != OpAMD64ORLconst {
14373 break
14374 }
14375 d := auxIntToInt32(v_0.AuxInt)
14376 x := v_0.Args[0]
14377 v.reset(OpAMD64ORLconst)
14378 v.AuxInt = int32ToAuxInt(c | d)
14379 v.AddArg(x)
14380 return true
14381 }
14382
14383
14384
14385 for {
14386 c := auxIntToInt32(v.AuxInt)
14387 x := v_0
14388 if !(c == 0) {
14389 break
14390 }
14391 v.copyOf(x)
14392 return true
14393 }
14394
14395
14396
14397 for {
14398 c := auxIntToInt32(v.AuxInt)
14399 if !(c == -1) {
14400 break
14401 }
14402 v.reset(OpAMD64MOVLconst)
14403 v.AuxInt = int32ToAuxInt(-1)
14404 return true
14405 }
14406
14407
14408 for {
14409 c := auxIntToInt32(v.AuxInt)
14410 if v_0.Op != OpAMD64MOVLconst {
14411 break
14412 }
14413 d := auxIntToInt32(v_0.AuxInt)
14414 v.reset(OpAMD64MOVLconst)
14415 v.AuxInt = int32ToAuxInt(c | d)
14416 return true
14417 }
14418 return false
14419 }
14420 func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
14421 v_1 := v.Args[1]
14422 v_0 := v.Args[0]
14423
14424
14425
14426 for {
14427 valoff1 := auxIntToValAndOff(v.AuxInt)
14428 sym := auxToSym(v.Aux)
14429 if v_0.Op != OpAMD64ADDQconst {
14430 break
14431 }
14432 off2 := auxIntToInt32(v_0.AuxInt)
14433 base := v_0.Args[0]
14434 mem := v_1
14435 if !(ValAndOff(valoff1).canAdd32(off2)) {
14436 break
14437 }
14438 v.reset(OpAMD64ORLconstmodify)
14439 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14440 v.Aux = symToAux(sym)
14441 v.AddArg2(base, mem)
14442 return true
14443 }
14444
14445
14446
14447 for {
14448 valoff1 := auxIntToValAndOff(v.AuxInt)
14449 sym1 := auxToSym(v.Aux)
14450 if v_0.Op != OpAMD64LEAQ {
14451 break
14452 }
14453 off2 := auxIntToInt32(v_0.AuxInt)
14454 sym2 := auxToSym(v_0.Aux)
14455 base := v_0.Args[0]
14456 mem := v_1
14457 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14458 break
14459 }
14460 v.reset(OpAMD64ORLconstmodify)
14461 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14462 v.Aux = symToAux(mergeSym(sym1, sym2))
14463 v.AddArg2(base, mem)
14464 return true
14465 }
14466 return false
14467 }
14468 func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
14469 v_2 := v.Args[2]
14470 v_1 := v.Args[1]
14471 v_0 := v.Args[0]
14472 b := v.Block
14473 typ := &b.Func.Config.Types
14474
14475
14476
14477 for {
14478 off1 := auxIntToInt32(v.AuxInt)
14479 sym := auxToSym(v.Aux)
14480 val := v_0
14481 if v_1.Op != OpAMD64ADDQconst {
14482 break
14483 }
14484 off2 := auxIntToInt32(v_1.AuxInt)
14485 base := v_1.Args[0]
14486 mem := v_2
14487 if !(is32Bit(int64(off1) + int64(off2))) {
14488 break
14489 }
14490 v.reset(OpAMD64ORLload)
14491 v.AuxInt = int32ToAuxInt(off1 + off2)
14492 v.Aux = symToAux(sym)
14493 v.AddArg3(val, base, mem)
14494 return true
14495 }
14496
14497
14498
14499 for {
14500 off1 := auxIntToInt32(v.AuxInt)
14501 sym1 := auxToSym(v.Aux)
14502 val := v_0
14503 if v_1.Op != OpAMD64LEAQ {
14504 break
14505 }
14506 off2 := auxIntToInt32(v_1.AuxInt)
14507 sym2 := auxToSym(v_1.Aux)
14508 base := v_1.Args[0]
14509 mem := v_2
14510 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14511 break
14512 }
14513 v.reset(OpAMD64ORLload)
14514 v.AuxInt = int32ToAuxInt(off1 + off2)
14515 v.Aux = symToAux(mergeSym(sym1, sym2))
14516 v.AddArg3(val, base, mem)
14517 return true
14518 }
14519
14520
14521 for {
14522 off := auxIntToInt32(v.AuxInt)
14523 sym := auxToSym(v.Aux)
14524 x := v_0
14525 ptr := v_1
14526 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14527 break
14528 }
14529 y := v_2.Args[1]
14530 if ptr != v_2.Args[0] {
14531 break
14532 }
14533 v.reset(OpAMD64ORL)
14534 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
14535 v0.AddArg(y)
14536 v.AddArg2(x, v0)
14537 return true
14538 }
14539 return false
14540 }
14541 func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
14542 v_2 := v.Args[2]
14543 v_1 := v.Args[1]
14544 v_0 := v.Args[0]
14545
14546
14547
14548 for {
14549 off1 := auxIntToInt32(v.AuxInt)
14550 sym := auxToSym(v.Aux)
14551 if v_0.Op != OpAMD64ADDQconst {
14552 break
14553 }
14554 off2 := auxIntToInt32(v_0.AuxInt)
14555 base := v_0.Args[0]
14556 val := v_1
14557 mem := v_2
14558 if !(is32Bit(int64(off1) + int64(off2))) {
14559 break
14560 }
14561 v.reset(OpAMD64ORLmodify)
14562 v.AuxInt = int32ToAuxInt(off1 + off2)
14563 v.Aux = symToAux(sym)
14564 v.AddArg3(base, val, mem)
14565 return true
14566 }
14567
14568
14569
14570 for {
14571 off1 := auxIntToInt32(v.AuxInt)
14572 sym1 := auxToSym(v.Aux)
14573 if v_0.Op != OpAMD64LEAQ {
14574 break
14575 }
14576 off2 := auxIntToInt32(v_0.AuxInt)
14577 sym2 := auxToSym(v_0.Aux)
14578 base := v_0.Args[0]
14579 val := v_1
14580 mem := v_2
14581 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14582 break
14583 }
14584 v.reset(OpAMD64ORLmodify)
14585 v.AuxInt = int32ToAuxInt(off1 + off2)
14586 v.Aux = symToAux(mergeSym(sym1, sym2))
14587 v.AddArg3(base, val, mem)
14588 return true
14589 }
14590 return false
14591 }
14592 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
14593 v_1 := v.Args[1]
14594 v_0 := v.Args[0]
14595
14596
14597 for {
14598 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14599 if v_0.Op != OpAMD64SHLQ {
14600 continue
14601 }
14602 y := v_0.Args[1]
14603 v_0_0 := v_0.Args[0]
14604 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
14605 continue
14606 }
14607 x := v_1
14608 v.reset(OpAMD64BTSQ)
14609 v.AddArg2(x, y)
14610 return true
14611 }
14612 break
14613 }
14614
14615
14616
14617 for {
14618 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14619 if v_0.Op != OpAMD64MOVQconst {
14620 continue
14621 }
14622 c := auxIntToInt64(v_0.AuxInt)
14623 x := v_1
14624 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
14625 continue
14626 }
14627 v.reset(OpAMD64BTSQconst)
14628 v.AuxInt = int8ToAuxInt(int8(log64(c)))
14629 v.AddArg(x)
14630 return true
14631 }
14632 break
14633 }
14634
14635
14636
14637 for {
14638 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14639 x := v_0
14640 if v_1.Op != OpAMD64MOVQconst {
14641 continue
14642 }
14643 c := auxIntToInt64(v_1.AuxInt)
14644 if !(is32Bit(c)) {
14645 continue
14646 }
14647 v.reset(OpAMD64ORQconst)
14648 v.AuxInt = int32ToAuxInt(int32(c))
14649 v.AddArg(x)
14650 return true
14651 }
14652 break
14653 }
14654
14655
14656 for {
14657 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14658 x := v_0
14659 if v_1.Op != OpAMD64MOVLconst {
14660 continue
14661 }
14662 c := auxIntToInt32(v_1.AuxInt)
14663 v.reset(OpAMD64ORQconst)
14664 v.AuxInt = int32ToAuxInt(c)
14665 v.AddArg(x)
14666 return true
14667 }
14668 break
14669 }
14670
14671
14672 for {
14673 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14674 if v_0.Op != OpAMD64SHRQ {
14675 continue
14676 }
14677 bits := v_0.Args[1]
14678 lo := v_0.Args[0]
14679 if v_1.Op != OpAMD64SHLQ {
14680 continue
14681 }
14682 _ = v_1.Args[1]
14683 hi := v_1.Args[0]
14684 v_1_1 := v_1.Args[1]
14685 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14686 continue
14687 }
14688 v.reset(OpAMD64SHRDQ)
14689 v.AddArg3(lo, hi, bits)
14690 return true
14691 }
14692 break
14693 }
14694
14695
14696 for {
14697 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14698 if v_0.Op != OpAMD64SHLQ {
14699 continue
14700 }
14701 bits := v_0.Args[1]
14702 lo := v_0.Args[0]
14703 if v_1.Op != OpAMD64SHRQ {
14704 continue
14705 }
14706 _ = v_1.Args[1]
14707 hi := v_1.Args[0]
14708 v_1_1 := v_1.Args[1]
14709 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14710 continue
14711 }
14712 v.reset(OpAMD64SHLDQ)
14713 v.AddArg3(lo, hi, bits)
14714 return true
14715 }
14716 break
14717 }
14718
14719
14720 for {
14721 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14722 if v_0.Op != OpAMD64SHRXQ {
14723 continue
14724 }
14725 bits := v_0.Args[1]
14726 lo := v_0.Args[0]
14727 if v_1.Op != OpAMD64SHLXQ {
14728 continue
14729 }
14730 _ = v_1.Args[1]
14731 hi := v_1.Args[0]
14732 v_1_1 := v_1.Args[1]
14733 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14734 continue
14735 }
14736 v.reset(OpAMD64SHRDQ)
14737 v.AddArg3(lo, hi, bits)
14738 return true
14739 }
14740 break
14741 }
14742
14743
14744 for {
14745 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14746 if v_0.Op != OpAMD64SHLXQ {
14747 continue
14748 }
14749 bits := v_0.Args[1]
14750 lo := v_0.Args[0]
14751 if v_1.Op != OpAMD64SHRXQ {
14752 continue
14753 }
14754 _ = v_1.Args[1]
14755 hi := v_1.Args[0]
14756 v_1_1 := v_1.Args[1]
14757 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14758 continue
14759 }
14760 v.reset(OpAMD64SHLDQ)
14761 v.AddArg3(lo, hi, bits)
14762 return true
14763 }
14764 break
14765 }
14766
14767
14768 for {
14769 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14770 if v_0.Op != OpAMD64MOVQconst {
14771 continue
14772 }
14773 c := auxIntToInt64(v_0.AuxInt)
14774 if v_1.Op != OpAMD64MOVQconst {
14775 continue
14776 }
14777 d := auxIntToInt64(v_1.AuxInt)
14778 v.reset(OpAMD64MOVQconst)
14779 v.AuxInt = int64ToAuxInt(c | d)
14780 return true
14781 }
14782 break
14783 }
14784
14785
14786 for {
14787 x := v_0
14788 if x != v_1 {
14789 break
14790 }
14791 v.copyOf(x)
14792 return true
14793 }
14794
14795
14796
14797 for {
14798 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14799 x := v_0
14800 l := v_1
14801 if l.Op != OpAMD64MOVQload {
14802 continue
14803 }
14804 off := auxIntToInt32(l.AuxInt)
14805 sym := auxToSym(l.Aux)
14806 mem := l.Args[1]
14807 ptr := l.Args[0]
14808 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14809 continue
14810 }
14811 v.reset(OpAMD64ORQload)
14812 v.AuxInt = int32ToAuxInt(off)
14813 v.Aux = symToAux(sym)
14814 v.AddArg3(x, ptr, mem)
14815 return true
14816 }
14817 break
14818 }
14819 return false
14820 }
14821 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
14822 v_0 := v.Args[0]
14823
14824
14825 for {
14826 c := auxIntToInt32(v.AuxInt)
14827 if v_0.Op != OpAMD64ORQconst {
14828 break
14829 }
14830 d := auxIntToInt32(v_0.AuxInt)
14831 x := v_0.Args[0]
14832 v.reset(OpAMD64ORQconst)
14833 v.AuxInt = int32ToAuxInt(c | d)
14834 v.AddArg(x)
14835 return true
14836 }
14837
14838
14839 for {
14840 if auxIntToInt32(v.AuxInt) != 0 {
14841 break
14842 }
14843 x := v_0
14844 v.copyOf(x)
14845 return true
14846 }
14847
14848
14849 for {
14850 if auxIntToInt32(v.AuxInt) != -1 {
14851 break
14852 }
14853 v.reset(OpAMD64MOVQconst)
14854 v.AuxInt = int64ToAuxInt(-1)
14855 return true
14856 }
14857
14858
14859 for {
14860 c := auxIntToInt32(v.AuxInt)
14861 if v_0.Op != OpAMD64MOVQconst {
14862 break
14863 }
14864 d := auxIntToInt64(v_0.AuxInt)
14865 v.reset(OpAMD64MOVQconst)
14866 v.AuxInt = int64ToAuxInt(int64(c) | d)
14867 return true
14868 }
14869 return false
14870 }
14871 func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
14872 v_1 := v.Args[1]
14873 v_0 := v.Args[0]
14874
14875
14876
14877 for {
14878 valoff1 := auxIntToValAndOff(v.AuxInt)
14879 sym := auxToSym(v.Aux)
14880 if v_0.Op != OpAMD64ADDQconst {
14881 break
14882 }
14883 off2 := auxIntToInt32(v_0.AuxInt)
14884 base := v_0.Args[0]
14885 mem := v_1
14886 if !(ValAndOff(valoff1).canAdd32(off2)) {
14887 break
14888 }
14889 v.reset(OpAMD64ORQconstmodify)
14890 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14891 v.Aux = symToAux(sym)
14892 v.AddArg2(base, mem)
14893 return true
14894 }
14895
14896
14897
14898 for {
14899 valoff1 := auxIntToValAndOff(v.AuxInt)
14900 sym1 := auxToSym(v.Aux)
14901 if v_0.Op != OpAMD64LEAQ {
14902 break
14903 }
14904 off2 := auxIntToInt32(v_0.AuxInt)
14905 sym2 := auxToSym(v_0.Aux)
14906 base := v_0.Args[0]
14907 mem := v_1
14908 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14909 break
14910 }
14911 v.reset(OpAMD64ORQconstmodify)
14912 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14913 v.Aux = symToAux(mergeSym(sym1, sym2))
14914 v.AddArg2(base, mem)
14915 return true
14916 }
14917 return false
14918 }
14919 func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
14920 v_2 := v.Args[2]
14921 v_1 := v.Args[1]
14922 v_0 := v.Args[0]
14923 b := v.Block
14924 typ := &b.Func.Config.Types
14925
14926
14927
14928 for {
14929 off1 := auxIntToInt32(v.AuxInt)
14930 sym := auxToSym(v.Aux)
14931 val := v_0
14932 if v_1.Op != OpAMD64ADDQconst {
14933 break
14934 }
14935 off2 := auxIntToInt32(v_1.AuxInt)
14936 base := v_1.Args[0]
14937 mem := v_2
14938 if !(is32Bit(int64(off1) + int64(off2))) {
14939 break
14940 }
14941 v.reset(OpAMD64ORQload)
14942 v.AuxInt = int32ToAuxInt(off1 + off2)
14943 v.Aux = symToAux(sym)
14944 v.AddArg3(val, base, mem)
14945 return true
14946 }
14947
14948
14949
14950 for {
14951 off1 := auxIntToInt32(v.AuxInt)
14952 sym1 := auxToSym(v.Aux)
14953 val := v_0
14954 if v_1.Op != OpAMD64LEAQ {
14955 break
14956 }
14957 off2 := auxIntToInt32(v_1.AuxInt)
14958 sym2 := auxToSym(v_1.Aux)
14959 base := v_1.Args[0]
14960 mem := v_2
14961 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14962 break
14963 }
14964 v.reset(OpAMD64ORQload)
14965 v.AuxInt = int32ToAuxInt(off1 + off2)
14966 v.Aux = symToAux(mergeSym(sym1, sym2))
14967 v.AddArg3(val, base, mem)
14968 return true
14969 }
14970
14971
14972 for {
14973 off := auxIntToInt32(v.AuxInt)
14974 sym := auxToSym(v.Aux)
14975 x := v_0
14976 ptr := v_1
14977 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14978 break
14979 }
14980 y := v_2.Args[1]
14981 if ptr != v_2.Args[0] {
14982 break
14983 }
14984 v.reset(OpAMD64ORQ)
14985 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
14986 v0.AddArg(y)
14987 v.AddArg2(x, v0)
14988 return true
14989 }
14990 return false
14991 }
14992 func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
14993 v_2 := v.Args[2]
14994 v_1 := v.Args[1]
14995 v_0 := v.Args[0]
14996
14997
14998
14999 for {
15000 off1 := auxIntToInt32(v.AuxInt)
15001 sym := auxToSym(v.Aux)
15002 if v_0.Op != OpAMD64ADDQconst {
15003 break
15004 }
15005 off2 := auxIntToInt32(v_0.AuxInt)
15006 base := v_0.Args[0]
15007 val := v_1
15008 mem := v_2
15009 if !(is32Bit(int64(off1) + int64(off2))) {
15010 break
15011 }
15012 v.reset(OpAMD64ORQmodify)
15013 v.AuxInt = int32ToAuxInt(off1 + off2)
15014 v.Aux = symToAux(sym)
15015 v.AddArg3(base, val, mem)
15016 return true
15017 }
15018
15019
15020
15021 for {
15022 off1 := auxIntToInt32(v.AuxInt)
15023 sym1 := auxToSym(v.Aux)
15024 if v_0.Op != OpAMD64LEAQ {
15025 break
15026 }
15027 off2 := auxIntToInt32(v_0.AuxInt)
15028 sym2 := auxToSym(v_0.Aux)
15029 base := v_0.Args[0]
15030 val := v_1
15031 mem := v_2
15032 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15033 break
15034 }
15035 v.reset(OpAMD64ORQmodify)
15036 v.AuxInt = int32ToAuxInt(off1 + off2)
15037 v.Aux = symToAux(mergeSym(sym1, sym2))
15038 v.AddArg3(base, val, mem)
15039 return true
15040 }
15041 return false
15042 }
15043 func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
15044 v_1 := v.Args[1]
15045 v_0 := v.Args[0]
15046
15047
15048 for {
15049 x := v_0
15050 if v_1.Op != OpAMD64NEGQ {
15051 break
15052 }
15053 y := v_1.Args[0]
15054 v.reset(OpAMD64RORB)
15055 v.AddArg2(x, y)
15056 return true
15057 }
15058
15059
15060 for {
15061 x := v_0
15062 if v_1.Op != OpAMD64NEGL {
15063 break
15064 }
15065 y := v_1.Args[0]
15066 v.reset(OpAMD64RORB)
15067 v.AddArg2(x, y)
15068 return true
15069 }
15070
15071
15072 for {
15073 x := v_0
15074 if v_1.Op != OpAMD64MOVQconst {
15075 break
15076 }
15077 c := auxIntToInt64(v_1.AuxInt)
15078 v.reset(OpAMD64ROLBconst)
15079 v.AuxInt = int8ToAuxInt(int8(c & 7))
15080 v.AddArg(x)
15081 return true
15082 }
15083
15084
15085 for {
15086 x := v_0
15087 if v_1.Op != OpAMD64MOVLconst {
15088 break
15089 }
15090 c := auxIntToInt32(v_1.AuxInt)
15091 v.reset(OpAMD64ROLBconst)
15092 v.AuxInt = int8ToAuxInt(int8(c & 7))
15093 v.AddArg(x)
15094 return true
15095 }
15096 return false
15097 }
15098 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
15099 v_0 := v.Args[0]
15100
15101
15102 for {
15103 if auxIntToInt8(v.AuxInt) != 0 {
15104 break
15105 }
15106 x := v_0
15107 v.copyOf(x)
15108 return true
15109 }
15110 return false
15111 }
15112 func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
15113 v_1 := v.Args[1]
15114 v_0 := v.Args[0]
15115
15116
15117 for {
15118 x := v_0
15119 if v_1.Op != OpAMD64NEGQ {
15120 break
15121 }
15122 y := v_1.Args[0]
15123 v.reset(OpAMD64RORL)
15124 v.AddArg2(x, y)
15125 return true
15126 }
15127
15128
15129 for {
15130 x := v_0
15131 if v_1.Op != OpAMD64NEGL {
15132 break
15133 }
15134 y := v_1.Args[0]
15135 v.reset(OpAMD64RORL)
15136 v.AddArg2(x, y)
15137 return true
15138 }
15139
15140
15141 for {
15142 x := v_0
15143 if v_1.Op != OpAMD64MOVQconst {
15144 break
15145 }
15146 c := auxIntToInt64(v_1.AuxInt)
15147 v.reset(OpAMD64ROLLconst)
15148 v.AuxInt = int8ToAuxInt(int8(c & 31))
15149 v.AddArg(x)
15150 return true
15151 }
15152
15153
15154 for {
15155 x := v_0
15156 if v_1.Op != OpAMD64MOVLconst {
15157 break
15158 }
15159 c := auxIntToInt32(v_1.AuxInt)
15160 v.reset(OpAMD64ROLLconst)
15161 v.AuxInt = int8ToAuxInt(int8(c & 31))
15162 v.AddArg(x)
15163 return true
15164 }
15165 return false
15166 }
15167 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
15168 v_0 := v.Args[0]
15169
15170
15171 for {
15172 if auxIntToInt8(v.AuxInt) != 0 {
15173 break
15174 }
15175 x := v_0
15176 v.copyOf(x)
15177 return true
15178 }
15179 return false
15180 }
15181 func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
15182 v_1 := v.Args[1]
15183 v_0 := v.Args[0]
15184
15185
15186 for {
15187 x := v_0
15188 if v_1.Op != OpAMD64NEGQ {
15189 break
15190 }
15191 y := v_1.Args[0]
15192 v.reset(OpAMD64RORQ)
15193 v.AddArg2(x, y)
15194 return true
15195 }
15196
15197
15198 for {
15199 x := v_0
15200 if v_1.Op != OpAMD64NEGL {
15201 break
15202 }
15203 y := v_1.Args[0]
15204 v.reset(OpAMD64RORQ)
15205 v.AddArg2(x, y)
15206 return true
15207 }
15208
15209
15210 for {
15211 x := v_0
15212 if v_1.Op != OpAMD64MOVQconst {
15213 break
15214 }
15215 c := auxIntToInt64(v_1.AuxInt)
15216 v.reset(OpAMD64ROLQconst)
15217 v.AuxInt = int8ToAuxInt(int8(c & 63))
15218 v.AddArg(x)
15219 return true
15220 }
15221
15222
15223 for {
15224 x := v_0
15225 if v_1.Op != OpAMD64MOVLconst {
15226 break
15227 }
15228 c := auxIntToInt32(v_1.AuxInt)
15229 v.reset(OpAMD64ROLQconst)
15230 v.AuxInt = int8ToAuxInt(int8(c & 63))
15231 v.AddArg(x)
15232 return true
15233 }
15234 return false
15235 }
15236 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
15237 v_0 := v.Args[0]
15238
15239
15240 for {
15241 if auxIntToInt8(v.AuxInt) != 0 {
15242 break
15243 }
15244 x := v_0
15245 v.copyOf(x)
15246 return true
15247 }
15248 return false
15249 }
15250 func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
15251 v_1 := v.Args[1]
15252 v_0 := v.Args[0]
15253
15254
15255 for {
15256 x := v_0
15257 if v_1.Op != OpAMD64NEGQ {
15258 break
15259 }
15260 y := v_1.Args[0]
15261 v.reset(OpAMD64RORW)
15262 v.AddArg2(x, y)
15263 return true
15264 }
15265
15266
15267 for {
15268 x := v_0
15269 if v_1.Op != OpAMD64NEGL {
15270 break
15271 }
15272 y := v_1.Args[0]
15273 v.reset(OpAMD64RORW)
15274 v.AddArg2(x, y)
15275 return true
15276 }
15277
15278
15279 for {
15280 x := v_0
15281 if v_1.Op != OpAMD64MOVQconst {
15282 break
15283 }
15284 c := auxIntToInt64(v_1.AuxInt)
15285 v.reset(OpAMD64ROLWconst)
15286 v.AuxInt = int8ToAuxInt(int8(c & 15))
15287 v.AddArg(x)
15288 return true
15289 }
15290
15291
15292 for {
15293 x := v_0
15294 if v_1.Op != OpAMD64MOVLconst {
15295 break
15296 }
15297 c := auxIntToInt32(v_1.AuxInt)
15298 v.reset(OpAMD64ROLWconst)
15299 v.AuxInt = int8ToAuxInt(int8(c & 15))
15300 v.AddArg(x)
15301 return true
15302 }
15303 return false
15304 }
15305 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
15306 v_0 := v.Args[0]
15307
15308
15309 for {
15310 if auxIntToInt8(v.AuxInt) != 0 {
15311 break
15312 }
15313 x := v_0
15314 v.copyOf(x)
15315 return true
15316 }
15317 return false
15318 }
15319 func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
15320 v_1 := v.Args[1]
15321 v_0 := v.Args[0]
15322
15323
15324 for {
15325 x := v_0
15326 if v_1.Op != OpAMD64NEGQ {
15327 break
15328 }
15329 y := v_1.Args[0]
15330 v.reset(OpAMD64ROLB)
15331 v.AddArg2(x, y)
15332 return true
15333 }
15334
15335
15336 for {
15337 x := v_0
15338 if v_1.Op != OpAMD64NEGL {
15339 break
15340 }
15341 y := v_1.Args[0]
15342 v.reset(OpAMD64ROLB)
15343 v.AddArg2(x, y)
15344 return true
15345 }
15346
15347
15348 for {
15349 x := v_0
15350 if v_1.Op != OpAMD64MOVQconst {
15351 break
15352 }
15353 c := auxIntToInt64(v_1.AuxInt)
15354 v.reset(OpAMD64ROLBconst)
15355 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15356 v.AddArg(x)
15357 return true
15358 }
15359
15360
15361 for {
15362 x := v_0
15363 if v_1.Op != OpAMD64MOVLconst {
15364 break
15365 }
15366 c := auxIntToInt32(v_1.AuxInt)
15367 v.reset(OpAMD64ROLBconst)
15368 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15369 v.AddArg(x)
15370 return true
15371 }
15372 return false
15373 }
15374 func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
15375 v_1 := v.Args[1]
15376 v_0 := v.Args[0]
15377
15378
15379 for {
15380 x := v_0
15381 if v_1.Op != OpAMD64NEGQ {
15382 break
15383 }
15384 y := v_1.Args[0]
15385 v.reset(OpAMD64ROLL)
15386 v.AddArg2(x, y)
15387 return true
15388 }
15389
15390
15391 for {
15392 x := v_0
15393 if v_1.Op != OpAMD64NEGL {
15394 break
15395 }
15396 y := v_1.Args[0]
15397 v.reset(OpAMD64ROLL)
15398 v.AddArg2(x, y)
15399 return true
15400 }
15401
15402
15403 for {
15404 x := v_0
15405 if v_1.Op != OpAMD64MOVQconst {
15406 break
15407 }
15408 c := auxIntToInt64(v_1.AuxInt)
15409 v.reset(OpAMD64ROLLconst)
15410 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15411 v.AddArg(x)
15412 return true
15413 }
15414
15415
15416 for {
15417 x := v_0
15418 if v_1.Op != OpAMD64MOVLconst {
15419 break
15420 }
15421 c := auxIntToInt32(v_1.AuxInt)
15422 v.reset(OpAMD64ROLLconst)
15423 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15424 v.AddArg(x)
15425 return true
15426 }
15427 return false
15428 }
15429 func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
15430 v_1 := v.Args[1]
15431 v_0 := v.Args[0]
15432
15433
15434 for {
15435 x := v_0
15436 if v_1.Op != OpAMD64NEGQ {
15437 break
15438 }
15439 y := v_1.Args[0]
15440 v.reset(OpAMD64ROLQ)
15441 v.AddArg2(x, y)
15442 return true
15443 }
15444
15445
15446 for {
15447 x := v_0
15448 if v_1.Op != OpAMD64NEGL {
15449 break
15450 }
15451 y := v_1.Args[0]
15452 v.reset(OpAMD64ROLQ)
15453 v.AddArg2(x, y)
15454 return true
15455 }
15456
15457
15458 for {
15459 x := v_0
15460 if v_1.Op != OpAMD64MOVQconst {
15461 break
15462 }
15463 c := auxIntToInt64(v_1.AuxInt)
15464 v.reset(OpAMD64ROLQconst)
15465 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15466 v.AddArg(x)
15467 return true
15468 }
15469
15470
15471 for {
15472 x := v_0
15473 if v_1.Op != OpAMD64MOVLconst {
15474 break
15475 }
15476 c := auxIntToInt32(v_1.AuxInt)
15477 v.reset(OpAMD64ROLQconst)
15478 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15479 v.AddArg(x)
15480 return true
15481 }
15482 return false
15483 }
15484 func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
15485 v_1 := v.Args[1]
15486 v_0 := v.Args[0]
15487
15488
15489 for {
15490 x := v_0
15491 if v_1.Op != OpAMD64NEGQ {
15492 break
15493 }
15494 y := v_1.Args[0]
15495 v.reset(OpAMD64ROLW)
15496 v.AddArg2(x, y)
15497 return true
15498 }
15499
15500
15501 for {
15502 x := v_0
15503 if v_1.Op != OpAMD64NEGL {
15504 break
15505 }
15506 y := v_1.Args[0]
15507 v.reset(OpAMD64ROLW)
15508 v.AddArg2(x, y)
15509 return true
15510 }
15511
15512
15513 for {
15514 x := v_0
15515 if v_1.Op != OpAMD64MOVQconst {
15516 break
15517 }
15518 c := auxIntToInt64(v_1.AuxInt)
15519 v.reset(OpAMD64ROLWconst)
15520 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15521 v.AddArg(x)
15522 return true
15523 }
15524
15525
15526 for {
15527 x := v_0
15528 if v_1.Op != OpAMD64MOVLconst {
15529 break
15530 }
15531 c := auxIntToInt32(v_1.AuxInt)
15532 v.reset(OpAMD64ROLWconst)
15533 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15534 v.AddArg(x)
15535 return true
15536 }
15537 return false
15538 }
15539 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
15540 v_1 := v.Args[1]
15541 v_0 := v.Args[0]
15542
15543
15544 for {
15545 x := v_0
15546 if v_1.Op != OpAMD64MOVQconst {
15547 break
15548 }
15549 c := auxIntToInt64(v_1.AuxInt)
15550 v.reset(OpAMD64SARBconst)
15551 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15552 v.AddArg(x)
15553 return true
15554 }
15555
15556
15557 for {
15558 x := v_0
15559 if v_1.Op != OpAMD64MOVLconst {
15560 break
15561 }
15562 c := auxIntToInt32(v_1.AuxInt)
15563 v.reset(OpAMD64SARBconst)
15564 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15565 v.AddArg(x)
15566 return true
15567 }
15568 return false
15569 }
15570 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
15571 v_0 := v.Args[0]
15572
15573
15574 for {
15575 if auxIntToInt8(v.AuxInt) != 0 {
15576 break
15577 }
15578 x := v_0
15579 v.copyOf(x)
15580 return true
15581 }
15582
15583
15584 for {
15585 c := auxIntToInt8(v.AuxInt)
15586 if v_0.Op != OpAMD64MOVQconst {
15587 break
15588 }
15589 d := auxIntToInt64(v_0.AuxInt)
15590 v.reset(OpAMD64MOVQconst)
15591 v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
15592 return true
15593 }
15594 return false
15595 }
15596 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
15597 v_1 := v.Args[1]
15598 v_0 := v.Args[0]
15599 b := v.Block
15600
15601
15602 for {
15603 x := v_0
15604 if v_1.Op != OpAMD64MOVQconst {
15605 break
15606 }
15607 c := auxIntToInt64(v_1.AuxInt)
15608 v.reset(OpAMD64SARLconst)
15609 v.AuxInt = int8ToAuxInt(int8(c & 31))
15610 v.AddArg(x)
15611 return true
15612 }
15613
15614
15615 for {
15616 x := v_0
15617 if v_1.Op != OpAMD64MOVLconst {
15618 break
15619 }
15620 c := auxIntToInt32(v_1.AuxInt)
15621 v.reset(OpAMD64SARLconst)
15622 v.AuxInt = int8ToAuxInt(int8(c & 31))
15623 v.AddArg(x)
15624 return true
15625 }
15626
15627
15628
15629 for {
15630 x := v_0
15631 if v_1.Op != OpAMD64ADDQconst {
15632 break
15633 }
15634 c := auxIntToInt32(v_1.AuxInt)
15635 y := v_1.Args[0]
15636 if !(c&31 == 0) {
15637 break
15638 }
15639 v.reset(OpAMD64SARL)
15640 v.AddArg2(x, y)
15641 return true
15642 }
15643
15644
15645
15646 for {
15647 x := v_0
15648 if v_1.Op != OpAMD64NEGQ {
15649 break
15650 }
15651 t := v_1.Type
15652 v_1_0 := v_1.Args[0]
15653 if v_1_0.Op != OpAMD64ADDQconst {
15654 break
15655 }
15656 c := auxIntToInt32(v_1_0.AuxInt)
15657 y := v_1_0.Args[0]
15658 if !(c&31 == 0) {
15659 break
15660 }
15661 v.reset(OpAMD64SARL)
15662 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15663 v0.AddArg(y)
15664 v.AddArg2(x, v0)
15665 return true
15666 }
15667
15668
15669
15670 for {
15671 x := v_0
15672 if v_1.Op != OpAMD64ANDQconst {
15673 break
15674 }
15675 c := auxIntToInt32(v_1.AuxInt)
15676 y := v_1.Args[0]
15677 if !(c&31 == 31) {
15678 break
15679 }
15680 v.reset(OpAMD64SARL)
15681 v.AddArg2(x, y)
15682 return true
15683 }
15684
15685
15686
15687 for {
15688 x := v_0
15689 if v_1.Op != OpAMD64NEGQ {
15690 break
15691 }
15692 t := v_1.Type
15693 v_1_0 := v_1.Args[0]
15694 if v_1_0.Op != OpAMD64ANDQconst {
15695 break
15696 }
15697 c := auxIntToInt32(v_1_0.AuxInt)
15698 y := v_1_0.Args[0]
15699 if !(c&31 == 31) {
15700 break
15701 }
15702 v.reset(OpAMD64SARL)
15703 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15704 v0.AddArg(y)
15705 v.AddArg2(x, v0)
15706 return true
15707 }
15708
15709
15710
15711 for {
15712 x := v_0
15713 if v_1.Op != OpAMD64ADDLconst {
15714 break
15715 }
15716 c := auxIntToInt32(v_1.AuxInt)
15717 y := v_1.Args[0]
15718 if !(c&31 == 0) {
15719 break
15720 }
15721 v.reset(OpAMD64SARL)
15722 v.AddArg2(x, y)
15723 return true
15724 }
15725
15726
15727
15728 for {
15729 x := v_0
15730 if v_1.Op != OpAMD64NEGL {
15731 break
15732 }
15733 t := v_1.Type
15734 v_1_0 := v_1.Args[0]
15735 if v_1_0.Op != OpAMD64ADDLconst {
15736 break
15737 }
15738 c := auxIntToInt32(v_1_0.AuxInt)
15739 y := v_1_0.Args[0]
15740 if !(c&31 == 0) {
15741 break
15742 }
15743 v.reset(OpAMD64SARL)
15744 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15745 v0.AddArg(y)
15746 v.AddArg2(x, v0)
15747 return true
15748 }
15749
15750
15751
15752 for {
15753 x := v_0
15754 if v_1.Op != OpAMD64ANDLconst {
15755 break
15756 }
15757 c := auxIntToInt32(v_1.AuxInt)
15758 y := v_1.Args[0]
15759 if !(c&31 == 31) {
15760 break
15761 }
15762 v.reset(OpAMD64SARL)
15763 v.AddArg2(x, y)
15764 return true
15765 }
15766
15767
15768
15769 for {
15770 x := v_0
15771 if v_1.Op != OpAMD64NEGL {
15772 break
15773 }
15774 t := v_1.Type
15775 v_1_0 := v_1.Args[0]
15776 if v_1_0.Op != OpAMD64ANDLconst {
15777 break
15778 }
15779 c := auxIntToInt32(v_1_0.AuxInt)
15780 y := v_1_0.Args[0]
15781 if !(c&31 == 31) {
15782 break
15783 }
15784 v.reset(OpAMD64SARL)
15785 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15786 v0.AddArg(y)
15787 v.AddArg2(x, v0)
15788 return true
15789 }
15790
15791
15792
15793 for {
15794 l := v_0
15795 if l.Op != OpAMD64MOVLload {
15796 break
15797 }
15798 off := auxIntToInt32(l.AuxInt)
15799 sym := auxToSym(l.Aux)
15800 mem := l.Args[1]
15801 ptr := l.Args[0]
15802 x := v_1
15803 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
15804 break
15805 }
15806 v.reset(OpAMD64SARXLload)
15807 v.AuxInt = int32ToAuxInt(off)
15808 v.Aux = symToAux(sym)
15809 v.AddArg3(ptr, x, mem)
15810 return true
15811 }
15812 return false
15813 }
15814 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
15815 v_0 := v.Args[0]
15816
15817
15818 for {
15819 if auxIntToInt8(v.AuxInt) != 0 {
15820 break
15821 }
15822 x := v_0
15823 v.copyOf(x)
15824 return true
15825 }
15826
15827
15828 for {
15829 c := auxIntToInt8(v.AuxInt)
15830 if v_0.Op != OpAMD64MOVQconst {
15831 break
15832 }
15833 d := auxIntToInt64(v_0.AuxInt)
15834 v.reset(OpAMD64MOVQconst)
15835 v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
15836 return true
15837 }
15838 return false
15839 }
15840 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
15841 v_1 := v.Args[1]
15842 v_0 := v.Args[0]
15843 b := v.Block
15844
15845
15846 for {
15847 x := v_0
15848 if v_1.Op != OpAMD64MOVQconst {
15849 break
15850 }
15851 c := auxIntToInt64(v_1.AuxInt)
15852 v.reset(OpAMD64SARQconst)
15853 v.AuxInt = int8ToAuxInt(int8(c & 63))
15854 v.AddArg(x)
15855 return true
15856 }
15857
15858
15859 for {
15860 x := v_0
15861 if v_1.Op != OpAMD64MOVLconst {
15862 break
15863 }
15864 c := auxIntToInt32(v_1.AuxInt)
15865 v.reset(OpAMD64SARQconst)
15866 v.AuxInt = int8ToAuxInt(int8(c & 63))
15867 v.AddArg(x)
15868 return true
15869 }
15870
15871
15872
15873 for {
15874 x := v_0
15875 if v_1.Op != OpAMD64ADDQconst {
15876 break
15877 }
15878 c := auxIntToInt32(v_1.AuxInt)
15879 y := v_1.Args[0]
15880 if !(c&63 == 0) {
15881 break
15882 }
15883 v.reset(OpAMD64SARQ)
15884 v.AddArg2(x, y)
15885 return true
15886 }
15887
15888
15889
15890 for {
15891 x := v_0
15892 if v_1.Op != OpAMD64NEGQ {
15893 break
15894 }
15895 t := v_1.Type
15896 v_1_0 := v_1.Args[0]
15897 if v_1_0.Op != OpAMD64ADDQconst {
15898 break
15899 }
15900 c := auxIntToInt32(v_1_0.AuxInt)
15901 y := v_1_0.Args[0]
15902 if !(c&63 == 0) {
15903 break
15904 }
15905 v.reset(OpAMD64SARQ)
15906 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15907 v0.AddArg(y)
15908 v.AddArg2(x, v0)
15909 return true
15910 }
15911
15912
15913
15914 for {
15915 x := v_0
15916 if v_1.Op != OpAMD64ANDQconst {
15917 break
15918 }
15919 c := auxIntToInt32(v_1.AuxInt)
15920 y := v_1.Args[0]
15921 if !(c&63 == 63) {
15922 break
15923 }
15924 v.reset(OpAMD64SARQ)
15925 v.AddArg2(x, y)
15926 return true
15927 }
15928
15929
15930
15931 for {
15932 x := v_0
15933 if v_1.Op != OpAMD64NEGQ {
15934 break
15935 }
15936 t := v_1.Type
15937 v_1_0 := v_1.Args[0]
15938 if v_1_0.Op != OpAMD64ANDQconst {
15939 break
15940 }
15941 c := auxIntToInt32(v_1_0.AuxInt)
15942 y := v_1_0.Args[0]
15943 if !(c&63 == 63) {
15944 break
15945 }
15946 v.reset(OpAMD64SARQ)
15947 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15948 v0.AddArg(y)
15949 v.AddArg2(x, v0)
15950 return true
15951 }
15952
15953
15954
15955 for {
15956 x := v_0
15957 if v_1.Op != OpAMD64ADDLconst {
15958 break
15959 }
15960 c := auxIntToInt32(v_1.AuxInt)
15961 y := v_1.Args[0]
15962 if !(c&63 == 0) {
15963 break
15964 }
15965 v.reset(OpAMD64SARQ)
15966 v.AddArg2(x, y)
15967 return true
15968 }
15969
15970
15971
15972 for {
15973 x := v_0
15974 if v_1.Op != OpAMD64NEGL {
15975 break
15976 }
15977 t := v_1.Type
15978 v_1_0 := v_1.Args[0]
15979 if v_1_0.Op != OpAMD64ADDLconst {
15980 break
15981 }
15982 c := auxIntToInt32(v_1_0.AuxInt)
15983 y := v_1_0.Args[0]
15984 if !(c&63 == 0) {
15985 break
15986 }
15987 v.reset(OpAMD64SARQ)
15988 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15989 v0.AddArg(y)
15990 v.AddArg2(x, v0)
15991 return true
15992 }
15993
15994
15995
15996 for {
15997 x := v_0
15998 if v_1.Op != OpAMD64ANDLconst {
15999 break
16000 }
16001 c := auxIntToInt32(v_1.AuxInt)
16002 y := v_1.Args[0]
16003 if !(c&63 == 63) {
16004 break
16005 }
16006 v.reset(OpAMD64SARQ)
16007 v.AddArg2(x, y)
16008 return true
16009 }
16010
16011
16012
16013 for {
16014 x := v_0
16015 if v_1.Op != OpAMD64NEGL {
16016 break
16017 }
16018 t := v_1.Type
16019 v_1_0 := v_1.Args[0]
16020 if v_1_0.Op != OpAMD64ANDLconst {
16021 break
16022 }
16023 c := auxIntToInt32(v_1_0.AuxInt)
16024 y := v_1_0.Args[0]
16025 if !(c&63 == 63) {
16026 break
16027 }
16028 v.reset(OpAMD64SARQ)
16029 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
16030 v0.AddArg(y)
16031 v.AddArg2(x, v0)
16032 return true
16033 }
16034
16035
16036
16037 for {
16038 l := v_0
16039 if l.Op != OpAMD64MOVQload {
16040 break
16041 }
16042 off := auxIntToInt32(l.AuxInt)
16043 sym := auxToSym(l.Aux)
16044 mem := l.Args[1]
16045 ptr := l.Args[0]
16046 x := v_1
16047 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
16048 break
16049 }
16050 v.reset(OpAMD64SARXQload)
16051 v.AuxInt = int32ToAuxInt(off)
16052 v.Aux = symToAux(sym)
16053 v.AddArg3(ptr, x, mem)
16054 return true
16055 }
16056 return false
16057 }
16058 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
16059 v_0 := v.Args[0]
16060
16061
16062 for {
16063 if auxIntToInt8(v.AuxInt) != 0 {
16064 break
16065 }
16066 x := v_0
16067 v.copyOf(x)
16068 return true
16069 }
16070
16071
16072 for {
16073 c := auxIntToInt8(v.AuxInt)
16074 if v_0.Op != OpAMD64MOVQconst {
16075 break
16076 }
16077 d := auxIntToInt64(v_0.AuxInt)
16078 v.reset(OpAMD64MOVQconst)
16079 v.AuxInt = int64ToAuxInt(d >> uint64(c))
16080 return true
16081 }
16082 return false
16083 }
16084 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
16085 v_1 := v.Args[1]
16086 v_0 := v.Args[0]
16087
16088
16089 for {
16090 x := v_0
16091 if v_1.Op != OpAMD64MOVQconst {
16092 break
16093 }
16094 c := auxIntToInt64(v_1.AuxInt)
16095 v.reset(OpAMD64SARWconst)
16096 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
16097 v.AddArg(x)
16098 return true
16099 }
16100
16101
16102 for {
16103 x := v_0
16104 if v_1.Op != OpAMD64MOVLconst {
16105 break
16106 }
16107 c := auxIntToInt32(v_1.AuxInt)
16108 v.reset(OpAMD64SARWconst)
16109 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
16110 v.AddArg(x)
16111 return true
16112 }
16113 return false
16114 }
16115 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
16116 v_0 := v.Args[0]
16117
16118
16119 for {
16120 if auxIntToInt8(v.AuxInt) != 0 {
16121 break
16122 }
16123 x := v_0
16124 v.copyOf(x)
16125 return true
16126 }
16127
16128
16129 for {
16130 c := auxIntToInt8(v.AuxInt)
16131 if v_0.Op != OpAMD64MOVQconst {
16132 break
16133 }
16134 d := auxIntToInt64(v_0.AuxInt)
16135 v.reset(OpAMD64MOVQconst)
16136 v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
16137 return true
16138 }
16139 return false
16140 }
16141 func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool {
16142 v_2 := v.Args[2]
16143 v_1 := v.Args[1]
16144 v_0 := v.Args[0]
16145 b := v.Block
16146 typ := &b.Func.Config.Types
16147
16148
16149 for {
16150 off := auxIntToInt32(v.AuxInt)
16151 sym := auxToSym(v.Aux)
16152 ptr := v_0
16153 if v_1.Op != OpAMD64MOVLconst {
16154 break
16155 }
16156 c := auxIntToInt32(v_1.AuxInt)
16157 mem := v_2
16158 v.reset(OpAMD64SARLconst)
16159 v.AuxInt = int8ToAuxInt(int8(c & 31))
16160 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
16161 v0.AuxInt = int32ToAuxInt(off)
16162 v0.Aux = symToAux(sym)
16163 v0.AddArg2(ptr, mem)
16164 v.AddArg(v0)
16165 return true
16166 }
16167 return false
16168 }
16169 func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool {
16170 v_2 := v.Args[2]
16171 v_1 := v.Args[1]
16172 v_0 := v.Args[0]
16173 b := v.Block
16174 typ := &b.Func.Config.Types
16175
16176
16177 for {
16178 off := auxIntToInt32(v.AuxInt)
16179 sym := auxToSym(v.Aux)
16180 ptr := v_0
16181 if v_1.Op != OpAMD64MOVQconst {
16182 break
16183 }
16184 c := auxIntToInt64(v_1.AuxInt)
16185 mem := v_2
16186 v.reset(OpAMD64SARQconst)
16187 v.AuxInt = int8ToAuxInt(int8(c & 63))
16188 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
16189 v0.AuxInt = int32ToAuxInt(off)
16190 v0.Aux = symToAux(sym)
16191 v0.AddArg2(ptr, mem)
16192 v.AddArg(v0)
16193 return true
16194 }
16195
16196
16197 for {
16198 off := auxIntToInt32(v.AuxInt)
16199 sym := auxToSym(v.Aux)
16200 ptr := v_0
16201 if v_1.Op != OpAMD64MOVLconst {
16202 break
16203 }
16204 c := auxIntToInt32(v_1.AuxInt)
16205 mem := v_2
16206 v.reset(OpAMD64SARQconst)
16207 v.AuxInt = int8ToAuxInt(int8(c & 63))
16208 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
16209 v0.AuxInt = int32ToAuxInt(off)
16210 v0.Aux = symToAux(sym)
16211 v0.AddArg2(ptr, mem)
16212 v.AddArg(v0)
16213 return true
16214 }
16215 return false
16216 }
16217 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
16218 v_0 := v.Args[0]
16219
16220
16221 for {
16222 if v_0.Op != OpAMD64FlagEQ {
16223 break
16224 }
16225 v.reset(OpAMD64MOVLconst)
16226 v.AuxInt = int32ToAuxInt(0)
16227 return true
16228 }
16229
16230
16231 for {
16232 if v_0.Op != OpAMD64FlagLT_ULT {
16233 break
16234 }
16235 v.reset(OpAMD64MOVLconst)
16236 v.AuxInt = int32ToAuxInt(-1)
16237 return true
16238 }
16239
16240
16241 for {
16242 if v_0.Op != OpAMD64FlagLT_UGT {
16243 break
16244 }
16245 v.reset(OpAMD64MOVLconst)
16246 v.AuxInt = int32ToAuxInt(0)
16247 return true
16248 }
16249
16250
16251 for {
16252 if v_0.Op != OpAMD64FlagGT_ULT {
16253 break
16254 }
16255 v.reset(OpAMD64MOVLconst)
16256 v.AuxInt = int32ToAuxInt(-1)
16257 return true
16258 }
16259
16260
16261 for {
16262 if v_0.Op != OpAMD64FlagGT_UGT {
16263 break
16264 }
16265 v.reset(OpAMD64MOVLconst)
16266 v.AuxInt = int32ToAuxInt(0)
16267 return true
16268 }
16269 return false
16270 }
16271 func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
16272 v_2 := v.Args[2]
16273 v_1 := v.Args[1]
16274 v_0 := v.Args[0]
16275
16276
16277
16278 for {
16279 x := v_0
16280 if v_1.Op != OpAMD64MOVQconst {
16281 break
16282 }
16283 c := auxIntToInt64(v_1.AuxInt)
16284 borrow := v_2
16285 if !(is32Bit(c)) {
16286 break
16287 }
16288 v.reset(OpAMD64SBBQconst)
16289 v.AuxInt = int32ToAuxInt(int32(c))
16290 v.AddArg2(x, borrow)
16291 return true
16292 }
16293
16294
16295 for {
16296 x := v_0
16297 y := v_1
16298 if v_2.Op != OpAMD64FlagEQ {
16299 break
16300 }
16301 v.reset(OpAMD64SUBQborrow)
16302 v.AddArg2(x, y)
16303 return true
16304 }
16305 return false
16306 }
16307 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
16308 v_0 := v.Args[0]
16309
16310
16311 for {
16312 if v_0.Op != OpAMD64FlagEQ {
16313 break
16314 }
16315 v.reset(OpAMD64MOVQconst)
16316 v.AuxInt = int64ToAuxInt(0)
16317 return true
16318 }
16319
16320
16321 for {
16322 if v_0.Op != OpAMD64FlagLT_ULT {
16323 break
16324 }
16325 v.reset(OpAMD64MOVQconst)
16326 v.AuxInt = int64ToAuxInt(-1)
16327 return true
16328 }
16329
16330
16331 for {
16332 if v_0.Op != OpAMD64FlagLT_UGT {
16333 break
16334 }
16335 v.reset(OpAMD64MOVQconst)
16336 v.AuxInt = int64ToAuxInt(0)
16337 return true
16338 }
16339
16340
16341 for {
16342 if v_0.Op != OpAMD64FlagGT_ULT {
16343 break
16344 }
16345 v.reset(OpAMD64MOVQconst)
16346 v.AuxInt = int64ToAuxInt(-1)
16347 return true
16348 }
16349
16350
16351 for {
16352 if v_0.Op != OpAMD64FlagGT_UGT {
16353 break
16354 }
16355 v.reset(OpAMD64MOVQconst)
16356 v.AuxInt = int64ToAuxInt(0)
16357 return true
16358 }
16359 return false
16360 }
16361 func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
16362 v_1 := v.Args[1]
16363 v_0 := v.Args[0]
16364
16365
16366 for {
16367 c := auxIntToInt32(v.AuxInt)
16368 x := v_0
16369 if v_1.Op != OpAMD64FlagEQ {
16370 break
16371 }
16372 v.reset(OpAMD64SUBQconstborrow)
16373 v.AuxInt = int32ToAuxInt(c)
16374 v.AddArg(x)
16375 return true
16376 }
16377 return false
16378 }
16379 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
16380 v_0 := v.Args[0]
16381
16382
16383 for {
16384 if v_0.Op != OpAMD64InvertFlags {
16385 break
16386 }
16387 x := v_0.Args[0]
16388 v.reset(OpAMD64SETB)
16389 v.AddArg(x)
16390 return true
16391 }
16392
16393
16394 for {
16395 if v_0.Op != OpAMD64FlagEQ {
16396 break
16397 }
16398 v.reset(OpAMD64MOVLconst)
16399 v.AuxInt = int32ToAuxInt(0)
16400 return true
16401 }
16402
16403
16404 for {
16405 if v_0.Op != OpAMD64FlagLT_ULT {
16406 break
16407 }
16408 v.reset(OpAMD64MOVLconst)
16409 v.AuxInt = int32ToAuxInt(0)
16410 return true
16411 }
16412
16413
16414 for {
16415 if v_0.Op != OpAMD64FlagLT_UGT {
16416 break
16417 }
16418 v.reset(OpAMD64MOVLconst)
16419 v.AuxInt = int32ToAuxInt(1)
16420 return true
16421 }
16422
16423
16424 for {
16425 if v_0.Op != OpAMD64FlagGT_ULT {
16426 break
16427 }
16428 v.reset(OpAMD64MOVLconst)
16429 v.AuxInt = int32ToAuxInt(0)
16430 return true
16431 }
16432
16433
16434 for {
16435 if v_0.Op != OpAMD64FlagGT_UGT {
16436 break
16437 }
16438 v.reset(OpAMD64MOVLconst)
16439 v.AuxInt = int32ToAuxInt(1)
16440 return true
16441 }
16442 return false
16443 }
16444 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
16445 v_0 := v.Args[0]
16446
16447
16448 for {
16449 if v_0.Op != OpAMD64TESTQ {
16450 break
16451 }
16452 x := v_0.Args[1]
16453 if x != v_0.Args[0] {
16454 break
16455 }
16456 v.reset(OpConstBool)
16457 v.AuxInt = boolToAuxInt(true)
16458 return true
16459 }
16460
16461
16462 for {
16463 if v_0.Op != OpAMD64TESTL {
16464 break
16465 }
16466 x := v_0.Args[1]
16467 if x != v_0.Args[0] {
16468 break
16469 }
16470 v.reset(OpConstBool)
16471 v.AuxInt = boolToAuxInt(true)
16472 return true
16473 }
16474
16475
16476 for {
16477 if v_0.Op != OpAMD64TESTW {
16478 break
16479 }
16480 x := v_0.Args[1]
16481 if x != v_0.Args[0] {
16482 break
16483 }
16484 v.reset(OpConstBool)
16485 v.AuxInt = boolToAuxInt(true)
16486 return true
16487 }
16488
16489
16490 for {
16491 if v_0.Op != OpAMD64TESTB {
16492 break
16493 }
16494 x := v_0.Args[1]
16495 if x != v_0.Args[0] {
16496 break
16497 }
16498 v.reset(OpConstBool)
16499 v.AuxInt = boolToAuxInt(true)
16500 return true
16501 }
16502
16503
16504 for {
16505 if v_0.Op != OpAMD64InvertFlags {
16506 break
16507 }
16508 x := v_0.Args[0]
16509 v.reset(OpAMD64SETBE)
16510 v.AddArg(x)
16511 return true
16512 }
16513
16514
16515 for {
16516 if v_0.Op != OpAMD64FlagEQ {
16517 break
16518 }
16519 v.reset(OpAMD64MOVLconst)
16520 v.AuxInt = int32ToAuxInt(1)
16521 return true
16522 }
16523
16524
16525 for {
16526 if v_0.Op != OpAMD64FlagLT_ULT {
16527 break
16528 }
16529 v.reset(OpAMD64MOVLconst)
16530 v.AuxInt = int32ToAuxInt(0)
16531 return true
16532 }
16533
16534
16535 for {
16536 if v_0.Op != OpAMD64FlagLT_UGT {
16537 break
16538 }
16539 v.reset(OpAMD64MOVLconst)
16540 v.AuxInt = int32ToAuxInt(1)
16541 return true
16542 }
16543
16544
16545 for {
16546 if v_0.Op != OpAMD64FlagGT_ULT {
16547 break
16548 }
16549 v.reset(OpAMD64MOVLconst)
16550 v.AuxInt = int32ToAuxInt(0)
16551 return true
16552 }
16553
16554
16555 for {
16556 if v_0.Op != OpAMD64FlagGT_UGT {
16557 break
16558 }
16559 v.reset(OpAMD64MOVLconst)
16560 v.AuxInt = int32ToAuxInt(1)
16561 return true
16562 }
16563 return false
16564 }
16565 func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
16566 v_2 := v.Args[2]
16567 v_1 := v.Args[1]
16568 v_0 := v.Args[0]
16569 b := v.Block
16570 typ := &b.Func.Config.Types
16571
16572
16573 for {
16574 off := auxIntToInt32(v.AuxInt)
16575 sym := auxToSym(v.Aux)
16576 ptr := v_0
16577 if v_1.Op != OpAMD64InvertFlags {
16578 break
16579 }
16580 x := v_1.Args[0]
16581 mem := v_2
16582 v.reset(OpAMD64SETBEstore)
16583 v.AuxInt = int32ToAuxInt(off)
16584 v.Aux = symToAux(sym)
16585 v.AddArg3(ptr, x, mem)
16586 return true
16587 }
16588
16589
16590
16591 for {
16592 off1 := auxIntToInt32(v.AuxInt)
16593 sym := auxToSym(v.Aux)
16594 if v_0.Op != OpAMD64ADDQconst {
16595 break
16596 }
16597 off2 := auxIntToInt32(v_0.AuxInt)
16598 base := v_0.Args[0]
16599 val := v_1
16600 mem := v_2
16601 if !(is32Bit(int64(off1) + int64(off2))) {
16602 break
16603 }
16604 v.reset(OpAMD64SETAEstore)
16605 v.AuxInt = int32ToAuxInt(off1 + off2)
16606 v.Aux = symToAux(sym)
16607 v.AddArg3(base, val, mem)
16608 return true
16609 }
16610
16611
16612
16613 for {
16614 off1 := auxIntToInt32(v.AuxInt)
16615 sym1 := auxToSym(v.Aux)
16616 if v_0.Op != OpAMD64LEAQ {
16617 break
16618 }
16619 off2 := auxIntToInt32(v_0.AuxInt)
16620 sym2 := auxToSym(v_0.Aux)
16621 base := v_0.Args[0]
16622 val := v_1
16623 mem := v_2
16624 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16625 break
16626 }
16627 v.reset(OpAMD64SETAEstore)
16628 v.AuxInt = int32ToAuxInt(off1 + off2)
16629 v.Aux = symToAux(mergeSym(sym1, sym2))
16630 v.AddArg3(base, val, mem)
16631 return true
16632 }
16633
16634
16635 for {
16636 off := auxIntToInt32(v.AuxInt)
16637 sym := auxToSym(v.Aux)
16638 ptr := v_0
16639 if v_1.Op != OpAMD64FlagEQ {
16640 break
16641 }
16642 mem := v_2
16643 v.reset(OpAMD64MOVBstore)
16644 v.AuxInt = int32ToAuxInt(off)
16645 v.Aux = symToAux(sym)
16646 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16647 v0.AuxInt = int32ToAuxInt(1)
16648 v.AddArg3(ptr, v0, mem)
16649 return true
16650 }
16651
16652
16653 for {
16654 off := auxIntToInt32(v.AuxInt)
16655 sym := auxToSym(v.Aux)
16656 ptr := v_0
16657 if v_1.Op != OpAMD64FlagLT_ULT {
16658 break
16659 }
16660 mem := v_2
16661 v.reset(OpAMD64MOVBstore)
16662 v.AuxInt = int32ToAuxInt(off)
16663 v.Aux = symToAux(sym)
16664 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16665 v0.AuxInt = int32ToAuxInt(0)
16666 v.AddArg3(ptr, v0, mem)
16667 return true
16668 }
16669
16670
16671 for {
16672 off := auxIntToInt32(v.AuxInt)
16673 sym := auxToSym(v.Aux)
16674 ptr := v_0
16675 if v_1.Op != OpAMD64FlagLT_UGT {
16676 break
16677 }
16678 mem := v_2
16679 v.reset(OpAMD64MOVBstore)
16680 v.AuxInt = int32ToAuxInt(off)
16681 v.Aux = symToAux(sym)
16682 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16683 v0.AuxInt = int32ToAuxInt(1)
16684 v.AddArg3(ptr, v0, mem)
16685 return true
16686 }
16687
16688
16689 for {
16690 off := auxIntToInt32(v.AuxInt)
16691 sym := auxToSym(v.Aux)
16692 ptr := v_0
16693 if v_1.Op != OpAMD64FlagGT_ULT {
16694 break
16695 }
16696 mem := v_2
16697 v.reset(OpAMD64MOVBstore)
16698 v.AuxInt = int32ToAuxInt(off)
16699 v.Aux = symToAux(sym)
16700 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16701 v0.AuxInt = int32ToAuxInt(0)
16702 v.AddArg3(ptr, v0, mem)
16703 return true
16704 }
16705
16706
16707 for {
16708 off := auxIntToInt32(v.AuxInt)
16709 sym := auxToSym(v.Aux)
16710 ptr := v_0
16711 if v_1.Op != OpAMD64FlagGT_UGT {
16712 break
16713 }
16714 mem := v_2
16715 v.reset(OpAMD64MOVBstore)
16716 v.AuxInt = int32ToAuxInt(off)
16717 v.Aux = symToAux(sym)
16718 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16719 v0.AuxInt = int32ToAuxInt(1)
16720 v.AddArg3(ptr, v0, mem)
16721 return true
16722 }
16723 return false
16724 }
16725 func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
16726 v_2 := v.Args[2]
16727 v_1 := v.Args[1]
16728 v_0 := v.Args[0]
16729 b := v.Block
16730 typ := &b.Func.Config.Types
16731
16732
16733 for {
16734 off := auxIntToInt32(v.AuxInt)
16735 sym := auxToSym(v.Aux)
16736 ptr := v_0
16737 if v_1.Op != OpAMD64InvertFlags {
16738 break
16739 }
16740 x := v_1.Args[0]
16741 mem := v_2
16742 v.reset(OpAMD64SETBstore)
16743 v.AuxInt = int32ToAuxInt(off)
16744 v.Aux = symToAux(sym)
16745 v.AddArg3(ptr, x, mem)
16746 return true
16747 }
16748
16749
16750
16751 for {
16752 off1 := auxIntToInt32(v.AuxInt)
16753 sym := auxToSym(v.Aux)
16754 if v_0.Op != OpAMD64ADDQconst {
16755 break
16756 }
16757 off2 := auxIntToInt32(v_0.AuxInt)
16758 base := v_0.Args[0]
16759 val := v_1
16760 mem := v_2
16761 if !(is32Bit(int64(off1) + int64(off2))) {
16762 break
16763 }
16764 v.reset(OpAMD64SETAstore)
16765 v.AuxInt = int32ToAuxInt(off1 + off2)
16766 v.Aux = symToAux(sym)
16767 v.AddArg3(base, val, mem)
16768 return true
16769 }
16770
16771
16772
16773 for {
16774 off1 := auxIntToInt32(v.AuxInt)
16775 sym1 := auxToSym(v.Aux)
16776 if v_0.Op != OpAMD64LEAQ {
16777 break
16778 }
16779 off2 := auxIntToInt32(v_0.AuxInt)
16780 sym2 := auxToSym(v_0.Aux)
16781 base := v_0.Args[0]
16782 val := v_1
16783 mem := v_2
16784 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16785 break
16786 }
16787 v.reset(OpAMD64SETAstore)
16788 v.AuxInt = int32ToAuxInt(off1 + off2)
16789 v.Aux = symToAux(mergeSym(sym1, sym2))
16790 v.AddArg3(base, val, mem)
16791 return true
16792 }
16793
16794
16795 for {
16796 off := auxIntToInt32(v.AuxInt)
16797 sym := auxToSym(v.Aux)
16798 ptr := v_0
16799 if v_1.Op != OpAMD64FlagEQ {
16800 break
16801 }
16802 mem := v_2
16803 v.reset(OpAMD64MOVBstore)
16804 v.AuxInt = int32ToAuxInt(off)
16805 v.Aux = symToAux(sym)
16806 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16807 v0.AuxInt = int32ToAuxInt(0)
16808 v.AddArg3(ptr, v0, mem)
16809 return true
16810 }
16811
16812
16813 for {
16814 off := auxIntToInt32(v.AuxInt)
16815 sym := auxToSym(v.Aux)
16816 ptr := v_0
16817 if v_1.Op != OpAMD64FlagLT_ULT {
16818 break
16819 }
16820 mem := v_2
16821 v.reset(OpAMD64MOVBstore)
16822 v.AuxInt = int32ToAuxInt(off)
16823 v.Aux = symToAux(sym)
16824 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16825 v0.AuxInt = int32ToAuxInt(0)
16826 v.AddArg3(ptr, v0, mem)
16827 return true
16828 }
16829
16830
16831 for {
16832 off := auxIntToInt32(v.AuxInt)
16833 sym := auxToSym(v.Aux)
16834 ptr := v_0
16835 if v_1.Op != OpAMD64FlagLT_UGT {
16836 break
16837 }
16838 mem := v_2
16839 v.reset(OpAMD64MOVBstore)
16840 v.AuxInt = int32ToAuxInt(off)
16841 v.Aux = symToAux(sym)
16842 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16843 v0.AuxInt = int32ToAuxInt(1)
16844 v.AddArg3(ptr, v0, mem)
16845 return true
16846 }
16847
16848
16849 for {
16850 off := auxIntToInt32(v.AuxInt)
16851 sym := auxToSym(v.Aux)
16852 ptr := v_0
16853 if v_1.Op != OpAMD64FlagGT_ULT {
16854 break
16855 }
16856 mem := v_2
16857 v.reset(OpAMD64MOVBstore)
16858 v.AuxInt = int32ToAuxInt(off)
16859 v.Aux = symToAux(sym)
16860 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16861 v0.AuxInt = int32ToAuxInt(0)
16862 v.AddArg3(ptr, v0, mem)
16863 return true
16864 }
16865
16866
16867 for {
16868 off := auxIntToInt32(v.AuxInt)
16869 sym := auxToSym(v.Aux)
16870 ptr := v_0
16871 if v_1.Op != OpAMD64FlagGT_UGT {
16872 break
16873 }
16874 mem := v_2
16875 v.reset(OpAMD64MOVBstore)
16876 v.AuxInt = int32ToAuxInt(off)
16877 v.Aux = symToAux(sym)
16878 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16879 v0.AuxInt = int32ToAuxInt(1)
16880 v.AddArg3(ptr, v0, mem)
16881 return true
16882 }
16883 return false
16884 }
16885 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
16886 v_0 := v.Args[0]
16887
16888
16889 for {
16890 if v_0.Op != OpAMD64TESTQ {
16891 break
16892 }
16893 x := v_0.Args[1]
16894 if x != v_0.Args[0] {
16895 break
16896 }
16897 v.reset(OpConstBool)
16898 v.AuxInt = boolToAuxInt(false)
16899 return true
16900 }
16901
16902
16903 for {
16904 if v_0.Op != OpAMD64TESTL {
16905 break
16906 }
16907 x := v_0.Args[1]
16908 if x != v_0.Args[0] {
16909 break
16910 }
16911 v.reset(OpConstBool)
16912 v.AuxInt = boolToAuxInt(false)
16913 return true
16914 }
16915
16916
16917 for {
16918 if v_0.Op != OpAMD64TESTW {
16919 break
16920 }
16921 x := v_0.Args[1]
16922 if x != v_0.Args[0] {
16923 break
16924 }
16925 v.reset(OpConstBool)
16926 v.AuxInt = boolToAuxInt(false)
16927 return true
16928 }
16929
16930
16931 for {
16932 if v_0.Op != OpAMD64TESTB {
16933 break
16934 }
16935 x := v_0.Args[1]
16936 if x != v_0.Args[0] {
16937 break
16938 }
16939 v.reset(OpConstBool)
16940 v.AuxInt = boolToAuxInt(false)
16941 return true
16942 }
16943
16944
16945 for {
16946 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
16947 break
16948 }
16949 x := v_0.Args[0]
16950 v.reset(OpAMD64ANDLconst)
16951 v.AuxInt = int32ToAuxInt(1)
16952 v.AddArg(x)
16953 return true
16954 }
16955
16956
16957 for {
16958 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
16959 break
16960 }
16961 x := v_0.Args[0]
16962 v.reset(OpAMD64ANDQconst)
16963 v.AuxInt = int32ToAuxInt(1)
16964 v.AddArg(x)
16965 return true
16966 }
16967
16968
16969 for {
16970 if v_0.Op != OpAMD64InvertFlags {
16971 break
16972 }
16973 x := v_0.Args[0]
16974 v.reset(OpAMD64SETA)
16975 v.AddArg(x)
16976 return true
16977 }
16978
16979
16980 for {
16981 if v_0.Op != OpAMD64FlagEQ {
16982 break
16983 }
16984 v.reset(OpAMD64MOVLconst)
16985 v.AuxInt = int32ToAuxInt(0)
16986 return true
16987 }
16988
16989
16990 for {
16991 if v_0.Op != OpAMD64FlagLT_ULT {
16992 break
16993 }
16994 v.reset(OpAMD64MOVLconst)
16995 v.AuxInt = int32ToAuxInt(1)
16996 return true
16997 }
16998
16999
17000 for {
17001 if v_0.Op != OpAMD64FlagLT_UGT {
17002 break
17003 }
17004 v.reset(OpAMD64MOVLconst)
17005 v.AuxInt = int32ToAuxInt(0)
17006 return true
17007 }
17008
17009
17010 for {
17011 if v_0.Op != OpAMD64FlagGT_ULT {
17012 break
17013 }
17014 v.reset(OpAMD64MOVLconst)
17015 v.AuxInt = int32ToAuxInt(1)
17016 return true
17017 }
17018
17019
17020 for {
17021 if v_0.Op != OpAMD64FlagGT_UGT {
17022 break
17023 }
17024 v.reset(OpAMD64MOVLconst)
17025 v.AuxInt = int32ToAuxInt(0)
17026 return true
17027 }
17028 return false
17029 }
17030 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
17031 v_0 := v.Args[0]
17032
17033
17034 for {
17035 if v_0.Op != OpAMD64InvertFlags {
17036 break
17037 }
17038 x := v_0.Args[0]
17039 v.reset(OpAMD64SETAE)
17040 v.AddArg(x)
17041 return true
17042 }
17043
17044
17045 for {
17046 if v_0.Op != OpAMD64FlagEQ {
17047 break
17048 }
17049 v.reset(OpAMD64MOVLconst)
17050 v.AuxInt = int32ToAuxInt(1)
17051 return true
17052 }
17053
17054
17055 for {
17056 if v_0.Op != OpAMD64FlagLT_ULT {
17057 break
17058 }
17059 v.reset(OpAMD64MOVLconst)
17060 v.AuxInt = int32ToAuxInt(1)
17061 return true
17062 }
17063
17064
17065 for {
17066 if v_0.Op != OpAMD64FlagLT_UGT {
17067 break
17068 }
17069 v.reset(OpAMD64MOVLconst)
17070 v.AuxInt = int32ToAuxInt(0)
17071 return true
17072 }
17073
17074
17075 for {
17076 if v_0.Op != OpAMD64FlagGT_ULT {
17077 break
17078 }
17079 v.reset(OpAMD64MOVLconst)
17080 v.AuxInt = int32ToAuxInt(1)
17081 return true
17082 }
17083
17084
17085 for {
17086 if v_0.Op != OpAMD64FlagGT_UGT {
17087 break
17088 }
17089 v.reset(OpAMD64MOVLconst)
17090 v.AuxInt = int32ToAuxInt(0)
17091 return true
17092 }
17093 return false
17094 }
17095 func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
17096 v_2 := v.Args[2]
17097 v_1 := v.Args[1]
17098 v_0 := v.Args[0]
17099 b := v.Block
17100 typ := &b.Func.Config.Types
17101
17102
17103 for {
17104 off := auxIntToInt32(v.AuxInt)
17105 sym := auxToSym(v.Aux)
17106 ptr := v_0
17107 if v_1.Op != OpAMD64InvertFlags {
17108 break
17109 }
17110 x := v_1.Args[0]
17111 mem := v_2
17112 v.reset(OpAMD64SETAEstore)
17113 v.AuxInt = int32ToAuxInt(off)
17114 v.Aux = symToAux(sym)
17115 v.AddArg3(ptr, x, mem)
17116 return true
17117 }
17118
17119
17120
17121 for {
17122 off1 := auxIntToInt32(v.AuxInt)
17123 sym := auxToSym(v.Aux)
17124 if v_0.Op != OpAMD64ADDQconst {
17125 break
17126 }
17127 off2 := auxIntToInt32(v_0.AuxInt)
17128 base := v_0.Args[0]
17129 val := v_1
17130 mem := v_2
17131 if !(is32Bit(int64(off1) + int64(off2))) {
17132 break
17133 }
17134 v.reset(OpAMD64SETBEstore)
17135 v.AuxInt = int32ToAuxInt(off1 + off2)
17136 v.Aux = symToAux(sym)
17137 v.AddArg3(base, val, mem)
17138 return true
17139 }
17140
17141
17142
17143 for {
17144 off1 := auxIntToInt32(v.AuxInt)
17145 sym1 := auxToSym(v.Aux)
17146 if v_0.Op != OpAMD64LEAQ {
17147 break
17148 }
17149 off2 := auxIntToInt32(v_0.AuxInt)
17150 sym2 := auxToSym(v_0.Aux)
17151 base := v_0.Args[0]
17152 val := v_1
17153 mem := v_2
17154 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17155 break
17156 }
17157 v.reset(OpAMD64SETBEstore)
17158 v.AuxInt = int32ToAuxInt(off1 + off2)
17159 v.Aux = symToAux(mergeSym(sym1, sym2))
17160 v.AddArg3(base, val, mem)
17161 return true
17162 }
17163
17164
17165 for {
17166 off := auxIntToInt32(v.AuxInt)
17167 sym := auxToSym(v.Aux)
17168 ptr := v_0
17169 if v_1.Op != OpAMD64FlagEQ {
17170 break
17171 }
17172 mem := v_2
17173 v.reset(OpAMD64MOVBstore)
17174 v.AuxInt = int32ToAuxInt(off)
17175 v.Aux = symToAux(sym)
17176 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17177 v0.AuxInt = int32ToAuxInt(1)
17178 v.AddArg3(ptr, v0, mem)
17179 return true
17180 }
17181
17182
17183 for {
17184 off := auxIntToInt32(v.AuxInt)
17185 sym := auxToSym(v.Aux)
17186 ptr := v_0
17187 if v_1.Op != OpAMD64FlagLT_ULT {
17188 break
17189 }
17190 mem := v_2
17191 v.reset(OpAMD64MOVBstore)
17192 v.AuxInt = int32ToAuxInt(off)
17193 v.Aux = symToAux(sym)
17194 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17195 v0.AuxInt = int32ToAuxInt(1)
17196 v.AddArg3(ptr, v0, mem)
17197 return true
17198 }
17199
17200
17201 for {
17202 off := auxIntToInt32(v.AuxInt)
17203 sym := auxToSym(v.Aux)
17204 ptr := v_0
17205 if v_1.Op != OpAMD64FlagLT_UGT {
17206 break
17207 }
17208 mem := v_2
17209 v.reset(OpAMD64MOVBstore)
17210 v.AuxInt = int32ToAuxInt(off)
17211 v.Aux = symToAux(sym)
17212 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17213 v0.AuxInt = int32ToAuxInt(0)
17214 v.AddArg3(ptr, v0, mem)
17215 return true
17216 }
17217
17218
17219 for {
17220 off := auxIntToInt32(v.AuxInt)
17221 sym := auxToSym(v.Aux)
17222 ptr := v_0
17223 if v_1.Op != OpAMD64FlagGT_ULT {
17224 break
17225 }
17226 mem := v_2
17227 v.reset(OpAMD64MOVBstore)
17228 v.AuxInt = int32ToAuxInt(off)
17229 v.Aux = symToAux(sym)
17230 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17231 v0.AuxInt = int32ToAuxInt(1)
17232 v.AddArg3(ptr, v0, mem)
17233 return true
17234 }
17235
17236
17237 for {
17238 off := auxIntToInt32(v.AuxInt)
17239 sym := auxToSym(v.Aux)
17240 ptr := v_0
17241 if v_1.Op != OpAMD64FlagGT_UGT {
17242 break
17243 }
17244 mem := v_2
17245 v.reset(OpAMD64MOVBstore)
17246 v.AuxInt = int32ToAuxInt(off)
17247 v.Aux = symToAux(sym)
17248 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17249 v0.AuxInt = int32ToAuxInt(0)
17250 v.AddArg3(ptr, v0, mem)
17251 return true
17252 }
17253 return false
17254 }
17255 func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
17256 v_2 := v.Args[2]
17257 v_1 := v.Args[1]
17258 v_0 := v.Args[0]
17259 b := v.Block
17260 typ := &b.Func.Config.Types
17261
17262
17263 for {
17264 off := auxIntToInt32(v.AuxInt)
17265 sym := auxToSym(v.Aux)
17266 ptr := v_0
17267 if v_1.Op != OpAMD64InvertFlags {
17268 break
17269 }
17270 x := v_1.Args[0]
17271 mem := v_2
17272 v.reset(OpAMD64SETAstore)
17273 v.AuxInt = int32ToAuxInt(off)
17274 v.Aux = symToAux(sym)
17275 v.AddArg3(ptr, x, mem)
17276 return true
17277 }
17278
17279
17280
17281 for {
17282 off1 := auxIntToInt32(v.AuxInt)
17283 sym := auxToSym(v.Aux)
17284 if v_0.Op != OpAMD64ADDQconst {
17285 break
17286 }
17287 off2 := auxIntToInt32(v_0.AuxInt)
17288 base := v_0.Args[0]
17289 val := v_1
17290 mem := v_2
17291 if !(is32Bit(int64(off1) + int64(off2))) {
17292 break
17293 }
17294 v.reset(OpAMD64SETBstore)
17295 v.AuxInt = int32ToAuxInt(off1 + off2)
17296 v.Aux = symToAux(sym)
17297 v.AddArg3(base, val, mem)
17298 return true
17299 }
17300
17301
17302
17303 for {
17304 off1 := auxIntToInt32(v.AuxInt)
17305 sym1 := auxToSym(v.Aux)
17306 if v_0.Op != OpAMD64LEAQ {
17307 break
17308 }
17309 off2 := auxIntToInt32(v_0.AuxInt)
17310 sym2 := auxToSym(v_0.Aux)
17311 base := v_0.Args[0]
17312 val := v_1
17313 mem := v_2
17314 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17315 break
17316 }
17317 v.reset(OpAMD64SETBstore)
17318 v.AuxInt = int32ToAuxInt(off1 + off2)
17319 v.Aux = symToAux(mergeSym(sym1, sym2))
17320 v.AddArg3(base, val, mem)
17321 return true
17322 }
17323
17324
17325 for {
17326 off := auxIntToInt32(v.AuxInt)
17327 sym := auxToSym(v.Aux)
17328 ptr := v_0
17329 if v_1.Op != OpAMD64FlagEQ {
17330 break
17331 }
17332 mem := v_2
17333 v.reset(OpAMD64MOVBstore)
17334 v.AuxInt = int32ToAuxInt(off)
17335 v.Aux = symToAux(sym)
17336 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17337 v0.AuxInt = int32ToAuxInt(0)
17338 v.AddArg3(ptr, v0, mem)
17339 return true
17340 }
17341
17342
17343 for {
17344 off := auxIntToInt32(v.AuxInt)
17345 sym := auxToSym(v.Aux)
17346 ptr := v_0
17347 if v_1.Op != OpAMD64FlagLT_ULT {
17348 break
17349 }
17350 mem := v_2
17351 v.reset(OpAMD64MOVBstore)
17352 v.AuxInt = int32ToAuxInt(off)
17353 v.Aux = symToAux(sym)
17354 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17355 v0.AuxInt = int32ToAuxInt(1)
17356 v.AddArg3(ptr, v0, mem)
17357 return true
17358 }
17359
17360
17361 for {
17362 off := auxIntToInt32(v.AuxInt)
17363 sym := auxToSym(v.Aux)
17364 ptr := v_0
17365 if v_1.Op != OpAMD64FlagLT_UGT {
17366 break
17367 }
17368 mem := v_2
17369 v.reset(OpAMD64MOVBstore)
17370 v.AuxInt = int32ToAuxInt(off)
17371 v.Aux = symToAux(sym)
17372 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17373 v0.AuxInt = int32ToAuxInt(0)
17374 v.AddArg3(ptr, v0, mem)
17375 return true
17376 }
17377
17378
17379 for {
17380 off := auxIntToInt32(v.AuxInt)
17381 sym := auxToSym(v.Aux)
17382 ptr := v_0
17383 if v_1.Op != OpAMD64FlagGT_ULT {
17384 break
17385 }
17386 mem := v_2
17387 v.reset(OpAMD64MOVBstore)
17388 v.AuxInt = int32ToAuxInt(off)
17389 v.Aux = symToAux(sym)
17390 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17391 v0.AuxInt = int32ToAuxInt(1)
17392 v.AddArg3(ptr, v0, mem)
17393 return true
17394 }
17395
17396
17397 for {
17398 off := auxIntToInt32(v.AuxInt)
17399 sym := auxToSym(v.Aux)
17400 ptr := v_0
17401 if v_1.Op != OpAMD64FlagGT_UGT {
17402 break
17403 }
17404 mem := v_2
17405 v.reset(OpAMD64MOVBstore)
17406 v.AuxInt = int32ToAuxInt(off)
17407 v.Aux = symToAux(sym)
17408 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17409 v0.AuxInt = int32ToAuxInt(0)
17410 v.AddArg3(ptr, v0, mem)
17411 return true
17412 }
17413 return false
17414 }
17415 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
17416 v_0 := v.Args[0]
17417 b := v.Block
17418
17419
17420 for {
17421 if v_0.Op != OpAMD64TESTL {
17422 break
17423 }
17424 _ = v_0.Args[1]
17425 v_0_0 := v_0.Args[0]
17426 v_0_1 := v_0.Args[1]
17427 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17428 if v_0_0.Op != OpAMD64SHLL {
17429 continue
17430 }
17431 x := v_0_0.Args[1]
17432 v_0_0_0 := v_0_0.Args[0]
17433 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
17434 continue
17435 }
17436 y := v_0_1
17437 v.reset(OpAMD64SETAE)
17438 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17439 v0.AddArg2(x, y)
17440 v.AddArg(v0)
17441 return true
17442 }
17443 break
17444 }
17445
17446
17447 for {
17448 if v_0.Op != OpAMD64TESTQ {
17449 break
17450 }
17451 _ = v_0.Args[1]
17452 v_0_0 := v_0.Args[0]
17453 v_0_1 := v_0.Args[1]
17454 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17455 if v_0_0.Op != OpAMD64SHLQ {
17456 continue
17457 }
17458 x := v_0_0.Args[1]
17459 v_0_0_0 := v_0_0.Args[0]
17460 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
17461 continue
17462 }
17463 y := v_0_1
17464 v.reset(OpAMD64SETAE)
17465 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
17466 v0.AddArg2(x, y)
17467 v.AddArg(v0)
17468 return true
17469 }
17470 break
17471 }
17472
17473
17474
17475 for {
17476 if v_0.Op != OpAMD64TESTLconst {
17477 break
17478 }
17479 c := auxIntToInt32(v_0.AuxInt)
17480 x := v_0.Args[0]
17481 if !(isUint32PowerOfTwo(int64(c))) {
17482 break
17483 }
17484 v.reset(OpAMD64SETAE)
17485 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17486 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17487 v0.AddArg(x)
17488 v.AddArg(v0)
17489 return true
17490 }
17491
17492
17493
17494 for {
17495 if v_0.Op != OpAMD64TESTQconst {
17496 break
17497 }
17498 c := auxIntToInt32(v_0.AuxInt)
17499 x := v_0.Args[0]
17500 if !(isUint64PowerOfTwo(int64(c))) {
17501 break
17502 }
17503 v.reset(OpAMD64SETAE)
17504 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17505 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17506 v0.AddArg(x)
17507 v.AddArg(v0)
17508 return true
17509 }
17510
17511
17512
17513 for {
17514 if v_0.Op != OpAMD64TESTQ {
17515 break
17516 }
17517 _ = v_0.Args[1]
17518 v_0_0 := v_0.Args[0]
17519 v_0_1 := v_0.Args[1]
17520 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17521 if v_0_0.Op != OpAMD64MOVQconst {
17522 continue
17523 }
17524 c := auxIntToInt64(v_0_0.AuxInt)
17525 x := v_0_1
17526 if !(isUint64PowerOfTwo(c)) {
17527 continue
17528 }
17529 v.reset(OpAMD64SETAE)
17530 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17531 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
17532 v0.AddArg(x)
17533 v.AddArg(v0)
17534 return true
17535 }
17536 break
17537 }
17538
17539
17540 for {
17541 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
17542 break
17543 }
17544 s := v_0.Args[0]
17545 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
17546 break
17547 }
17548 v.reset(OpAMD64SETNE)
17549 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
17550 v0.AuxInt = int32ToAuxInt(0)
17551 v0.AddArg(s)
17552 v.AddArg(v0)
17553 return true
17554 }
17555
17556
17557 for {
17558 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
17559 break
17560 }
17561 s := v_0.Args[0]
17562 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
17563 break
17564 }
17565 v.reset(OpAMD64SETNE)
17566 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
17567 v0.AuxInt = int32ToAuxInt(0)
17568 v0.AddArg(s)
17569 v.AddArg(v0)
17570 return true
17571 }
17572
17573
17574
17575 for {
17576 if v_0.Op != OpAMD64TESTQ {
17577 break
17578 }
17579 _ = v_0.Args[1]
17580 v_0_0 := v_0.Args[0]
17581 v_0_1 := v_0.Args[1]
17582 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17583 z1 := v_0_0
17584 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
17585 continue
17586 }
17587 z1_0 := z1.Args[0]
17588 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17589 continue
17590 }
17591 x := z1_0.Args[0]
17592 z2 := v_0_1
17593 if !(z1 == z2) {
17594 continue
17595 }
17596 v.reset(OpAMD64SETAE)
17597 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17598 v0.AuxInt = int8ToAuxInt(63)
17599 v0.AddArg(x)
17600 v.AddArg(v0)
17601 return true
17602 }
17603 break
17604 }
17605
17606
17607
17608 for {
17609 if v_0.Op != OpAMD64TESTL {
17610 break
17611 }
17612 _ = v_0.Args[1]
17613 v_0_0 := v_0.Args[0]
17614 v_0_1 := v_0.Args[1]
17615 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17616 z1 := v_0_0
17617 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
17618 continue
17619 }
17620 z1_0 := z1.Args[0]
17621 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17622 continue
17623 }
17624 x := z1_0.Args[0]
17625 z2 := v_0_1
17626 if !(z1 == z2) {
17627 continue
17628 }
17629 v.reset(OpAMD64SETAE)
17630 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17631 v0.AuxInt = int8ToAuxInt(31)
17632 v0.AddArg(x)
17633 v.AddArg(v0)
17634 return true
17635 }
17636 break
17637 }
17638
17639
17640
17641 for {
17642 if v_0.Op != OpAMD64TESTQ {
17643 break
17644 }
17645 _ = v_0.Args[1]
17646 v_0_0 := v_0.Args[0]
17647 v_0_1 := v_0.Args[1]
17648 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17649 z1 := v_0_0
17650 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
17651 continue
17652 }
17653 z1_0 := z1.Args[0]
17654 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17655 continue
17656 }
17657 x := z1_0.Args[0]
17658 z2 := v_0_1
17659 if !(z1 == z2) {
17660 continue
17661 }
17662 v.reset(OpAMD64SETAE)
17663 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17664 v0.AuxInt = int8ToAuxInt(0)
17665 v0.AddArg(x)
17666 v.AddArg(v0)
17667 return true
17668 }
17669 break
17670 }
17671
17672
17673
17674 for {
17675 if v_0.Op != OpAMD64TESTL {
17676 break
17677 }
17678 _ = v_0.Args[1]
17679 v_0_0 := v_0.Args[0]
17680 v_0_1 := v_0.Args[1]
17681 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17682 z1 := v_0_0
17683 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
17684 continue
17685 }
17686 z1_0 := z1.Args[0]
17687 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17688 continue
17689 }
17690 x := z1_0.Args[0]
17691 z2 := v_0_1
17692 if !(z1 == z2) {
17693 continue
17694 }
17695 v.reset(OpAMD64SETAE)
17696 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17697 v0.AuxInt = int8ToAuxInt(0)
17698 v0.AddArg(x)
17699 v.AddArg(v0)
17700 return true
17701 }
17702 break
17703 }
17704
17705
17706
17707 for {
17708 if v_0.Op != OpAMD64TESTQ {
17709 break
17710 }
17711 _ = v_0.Args[1]
17712 v_0_0 := v_0.Args[0]
17713 v_0_1 := v_0.Args[1]
17714 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17715 z1 := v_0_0
17716 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
17717 continue
17718 }
17719 x := z1.Args[0]
17720 z2 := v_0_1
17721 if !(z1 == z2) {
17722 continue
17723 }
17724 v.reset(OpAMD64SETAE)
17725 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17726 v0.AuxInt = int8ToAuxInt(63)
17727 v0.AddArg(x)
17728 v.AddArg(v0)
17729 return true
17730 }
17731 break
17732 }
17733
17734
17735
17736 for {
17737 if v_0.Op != OpAMD64TESTL {
17738 break
17739 }
17740 _ = v_0.Args[1]
17741 v_0_0 := v_0.Args[0]
17742 v_0_1 := v_0.Args[1]
17743 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17744 z1 := v_0_0
17745 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
17746 continue
17747 }
17748 x := z1.Args[0]
17749 z2 := v_0_1
17750 if !(z1 == z2) {
17751 continue
17752 }
17753 v.reset(OpAMD64SETAE)
17754 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17755 v0.AuxInt = int8ToAuxInt(31)
17756 v0.AddArg(x)
17757 v.AddArg(v0)
17758 return true
17759 }
17760 break
17761 }
17762
17763
17764 for {
17765 if v_0.Op != OpAMD64InvertFlags {
17766 break
17767 }
17768 x := v_0.Args[0]
17769 v.reset(OpAMD64SETEQ)
17770 v.AddArg(x)
17771 return true
17772 }
17773
17774
17775 for {
17776 if v_0.Op != OpAMD64FlagEQ {
17777 break
17778 }
17779 v.reset(OpAMD64MOVLconst)
17780 v.AuxInt = int32ToAuxInt(1)
17781 return true
17782 }
17783
17784
17785 for {
17786 if v_0.Op != OpAMD64FlagLT_ULT {
17787 break
17788 }
17789 v.reset(OpAMD64MOVLconst)
17790 v.AuxInt = int32ToAuxInt(0)
17791 return true
17792 }
17793
17794
17795 for {
17796 if v_0.Op != OpAMD64FlagLT_UGT {
17797 break
17798 }
17799 v.reset(OpAMD64MOVLconst)
17800 v.AuxInt = int32ToAuxInt(0)
17801 return true
17802 }
17803
17804
17805 for {
17806 if v_0.Op != OpAMD64FlagGT_ULT {
17807 break
17808 }
17809 v.reset(OpAMD64MOVLconst)
17810 v.AuxInt = int32ToAuxInt(0)
17811 return true
17812 }
17813
17814
17815 for {
17816 if v_0.Op != OpAMD64FlagGT_UGT {
17817 break
17818 }
17819 v.reset(OpAMD64MOVLconst)
17820 v.AuxInt = int32ToAuxInt(0)
17821 return true
17822 }
17823
17824
17825 for {
17826 if v_0.Op != OpAMD64TESTQ {
17827 break
17828 }
17829 _ = v_0.Args[1]
17830 v_0_0 := v_0.Args[0]
17831 v_0_1 := v_0.Args[1]
17832 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17833 s := v_0_0
17834 if s.Op != OpSelect0 {
17835 continue
17836 }
17837 blsr := s.Args[0]
17838 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
17839 continue
17840 }
17841 v.reset(OpAMD64SETEQ)
17842 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
17843 v0.AddArg(blsr)
17844 v.AddArg(v0)
17845 return true
17846 }
17847 break
17848 }
17849
17850
17851 for {
17852 if v_0.Op != OpAMD64TESTL {
17853 break
17854 }
17855 _ = v_0.Args[1]
17856 v_0_0 := v_0.Args[0]
17857 v_0_1 := v_0.Args[1]
17858 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17859 s := v_0_0
17860 if s.Op != OpSelect0 {
17861 continue
17862 }
17863 blsr := s.Args[0]
17864 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
17865 continue
17866 }
17867 v.reset(OpAMD64SETEQ)
17868 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
17869 v0.AddArg(blsr)
17870 v.AddArg(v0)
17871 return true
17872 }
17873 break
17874 }
17875 return false
17876 }
17877 func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
17878 v_2 := v.Args[2]
17879 v_1 := v.Args[1]
17880 v_0 := v.Args[0]
17881 b := v.Block
17882 typ := &b.Func.Config.Types
17883
17884
17885 for {
17886 off := auxIntToInt32(v.AuxInt)
17887 sym := auxToSym(v.Aux)
17888 ptr := v_0
17889 if v_1.Op != OpAMD64TESTL {
17890 break
17891 }
17892 _ = v_1.Args[1]
17893 v_1_0 := v_1.Args[0]
17894 v_1_1 := v_1.Args[1]
17895 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17896 if v_1_0.Op != OpAMD64SHLL {
17897 continue
17898 }
17899 x := v_1_0.Args[1]
17900 v_1_0_0 := v_1_0.Args[0]
17901 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
17902 continue
17903 }
17904 y := v_1_1
17905 mem := v_2
17906 v.reset(OpAMD64SETAEstore)
17907 v.AuxInt = int32ToAuxInt(off)
17908 v.Aux = symToAux(sym)
17909 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17910 v0.AddArg2(x, y)
17911 v.AddArg3(ptr, v0, mem)
17912 return true
17913 }
17914 break
17915 }
17916
17917
17918 for {
17919 off := auxIntToInt32(v.AuxInt)
17920 sym := auxToSym(v.Aux)
17921 ptr := v_0
17922 if v_1.Op != OpAMD64TESTQ {
17923 break
17924 }
17925 _ = v_1.Args[1]
17926 v_1_0 := v_1.Args[0]
17927 v_1_1 := v_1.Args[1]
17928 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17929 if v_1_0.Op != OpAMD64SHLQ {
17930 continue
17931 }
17932 x := v_1_0.Args[1]
17933 v_1_0_0 := v_1_0.Args[0]
17934 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
17935 continue
17936 }
17937 y := v_1_1
17938 mem := v_2
17939 v.reset(OpAMD64SETAEstore)
17940 v.AuxInt = int32ToAuxInt(off)
17941 v.Aux = symToAux(sym)
17942 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
17943 v0.AddArg2(x, y)
17944 v.AddArg3(ptr, v0, mem)
17945 return true
17946 }
17947 break
17948 }
17949
17950
17951
17952 for {
17953 off := auxIntToInt32(v.AuxInt)
17954 sym := auxToSym(v.Aux)
17955 ptr := v_0
17956 if v_1.Op != OpAMD64TESTLconst {
17957 break
17958 }
17959 c := auxIntToInt32(v_1.AuxInt)
17960 x := v_1.Args[0]
17961 mem := v_2
17962 if !(isUint32PowerOfTwo(int64(c))) {
17963 break
17964 }
17965 v.reset(OpAMD64SETAEstore)
17966 v.AuxInt = int32ToAuxInt(off)
17967 v.Aux = symToAux(sym)
17968 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17969 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17970 v0.AddArg(x)
17971 v.AddArg3(ptr, v0, mem)
17972 return true
17973 }
17974
17975
17976
17977 for {
17978 off := auxIntToInt32(v.AuxInt)
17979 sym := auxToSym(v.Aux)
17980 ptr := v_0
17981 if v_1.Op != OpAMD64TESTQconst {
17982 break
17983 }
17984 c := auxIntToInt32(v_1.AuxInt)
17985 x := v_1.Args[0]
17986 mem := v_2
17987 if !(isUint64PowerOfTwo(int64(c))) {
17988 break
17989 }
17990 v.reset(OpAMD64SETAEstore)
17991 v.AuxInt = int32ToAuxInt(off)
17992 v.Aux = symToAux(sym)
17993 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17994 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17995 v0.AddArg(x)
17996 v.AddArg3(ptr, v0, mem)
17997 return true
17998 }
17999
18000
18001
18002 for {
18003 off := auxIntToInt32(v.AuxInt)
18004 sym := auxToSym(v.Aux)
18005 ptr := v_0
18006 if v_1.Op != OpAMD64TESTQ {
18007 break
18008 }
18009 _ = v_1.Args[1]
18010 v_1_0 := v_1.Args[0]
18011 v_1_1 := v_1.Args[1]
18012 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18013 if v_1_0.Op != OpAMD64MOVQconst {
18014 continue
18015 }
18016 c := auxIntToInt64(v_1_0.AuxInt)
18017 x := v_1_1
18018 mem := v_2
18019 if !(isUint64PowerOfTwo(c)) {
18020 continue
18021 }
18022 v.reset(OpAMD64SETAEstore)
18023 v.AuxInt = int32ToAuxInt(off)
18024 v.Aux = symToAux(sym)
18025 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18026 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
18027 v0.AddArg(x)
18028 v.AddArg3(ptr, v0, mem)
18029 return true
18030 }
18031 break
18032 }
18033
18034
18035 for {
18036 off := auxIntToInt32(v.AuxInt)
18037 sym := auxToSym(v.Aux)
18038 ptr := v_0
18039 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
18040 break
18041 }
18042 s := v_1.Args[0]
18043 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
18044 break
18045 }
18046 mem := v_2
18047 v.reset(OpAMD64SETNEstore)
18048 v.AuxInt = int32ToAuxInt(off)
18049 v.Aux = symToAux(sym)
18050 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
18051 v0.AuxInt = int32ToAuxInt(0)
18052 v0.AddArg(s)
18053 v.AddArg3(ptr, v0, mem)
18054 return true
18055 }
18056
18057
18058 for {
18059 off := auxIntToInt32(v.AuxInt)
18060 sym := auxToSym(v.Aux)
18061 ptr := v_0
18062 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
18063 break
18064 }
18065 s := v_1.Args[0]
18066 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
18067 break
18068 }
18069 mem := v_2
18070 v.reset(OpAMD64SETNEstore)
18071 v.AuxInt = int32ToAuxInt(off)
18072 v.Aux = symToAux(sym)
18073 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
18074 v0.AuxInt = int32ToAuxInt(0)
18075 v0.AddArg(s)
18076 v.AddArg3(ptr, v0, mem)
18077 return true
18078 }
18079
18080
18081
18082 for {
18083 off := auxIntToInt32(v.AuxInt)
18084 sym := auxToSym(v.Aux)
18085 ptr := v_0
18086 if v_1.Op != OpAMD64TESTQ {
18087 break
18088 }
18089 _ = v_1.Args[1]
18090 v_1_0 := v_1.Args[0]
18091 v_1_1 := v_1.Args[1]
18092 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18093 z1 := v_1_0
18094 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
18095 continue
18096 }
18097 z1_0 := z1.Args[0]
18098 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18099 continue
18100 }
18101 x := z1_0.Args[0]
18102 z2 := v_1_1
18103 mem := v_2
18104 if !(z1 == z2) {
18105 continue
18106 }
18107 v.reset(OpAMD64SETAEstore)
18108 v.AuxInt = int32ToAuxInt(off)
18109 v.Aux = symToAux(sym)
18110 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18111 v0.AuxInt = int8ToAuxInt(63)
18112 v0.AddArg(x)
18113 v.AddArg3(ptr, v0, mem)
18114 return true
18115 }
18116 break
18117 }
18118
18119
18120
18121 for {
18122 off := auxIntToInt32(v.AuxInt)
18123 sym := auxToSym(v.Aux)
18124 ptr := v_0
18125 if v_1.Op != OpAMD64TESTL {
18126 break
18127 }
18128 _ = v_1.Args[1]
18129 v_1_0 := v_1.Args[0]
18130 v_1_1 := v_1.Args[1]
18131 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18132 z1 := v_1_0
18133 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
18134 continue
18135 }
18136 z1_0 := z1.Args[0]
18137 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18138 continue
18139 }
18140 x := z1_0.Args[0]
18141 z2 := v_1_1
18142 mem := v_2
18143 if !(z1 == z2) {
18144 continue
18145 }
18146 v.reset(OpAMD64SETAEstore)
18147 v.AuxInt = int32ToAuxInt(off)
18148 v.Aux = symToAux(sym)
18149 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18150 v0.AuxInt = int8ToAuxInt(31)
18151 v0.AddArg(x)
18152 v.AddArg3(ptr, v0, mem)
18153 return true
18154 }
18155 break
18156 }
18157
18158
18159
18160 for {
18161 off := auxIntToInt32(v.AuxInt)
18162 sym := auxToSym(v.Aux)
18163 ptr := v_0
18164 if v_1.Op != OpAMD64TESTQ {
18165 break
18166 }
18167 _ = v_1.Args[1]
18168 v_1_0 := v_1.Args[0]
18169 v_1_1 := v_1.Args[1]
18170 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18171 z1 := v_1_0
18172 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18173 continue
18174 }
18175 z1_0 := z1.Args[0]
18176 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18177 continue
18178 }
18179 x := z1_0.Args[0]
18180 z2 := v_1_1
18181 mem := v_2
18182 if !(z1 == z2) {
18183 continue
18184 }
18185 v.reset(OpAMD64SETAEstore)
18186 v.AuxInt = int32ToAuxInt(off)
18187 v.Aux = symToAux(sym)
18188 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18189 v0.AuxInt = int8ToAuxInt(0)
18190 v0.AddArg(x)
18191 v.AddArg3(ptr, v0, mem)
18192 return true
18193 }
18194 break
18195 }
18196
18197
18198
18199 for {
18200 off := auxIntToInt32(v.AuxInt)
18201 sym := auxToSym(v.Aux)
18202 ptr := v_0
18203 if v_1.Op != OpAMD64TESTL {
18204 break
18205 }
18206 _ = v_1.Args[1]
18207 v_1_0 := v_1.Args[0]
18208 v_1_1 := v_1.Args[1]
18209 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18210 z1 := v_1_0
18211 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18212 continue
18213 }
18214 z1_0 := z1.Args[0]
18215 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18216 continue
18217 }
18218 x := z1_0.Args[0]
18219 z2 := v_1_1
18220 mem := v_2
18221 if !(z1 == z2) {
18222 continue
18223 }
18224 v.reset(OpAMD64SETAEstore)
18225 v.AuxInt = int32ToAuxInt(off)
18226 v.Aux = symToAux(sym)
18227 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18228 v0.AuxInt = int8ToAuxInt(0)
18229 v0.AddArg(x)
18230 v.AddArg3(ptr, v0, mem)
18231 return true
18232 }
18233 break
18234 }
18235
18236
18237
18238 for {
18239 off := auxIntToInt32(v.AuxInt)
18240 sym := auxToSym(v.Aux)
18241 ptr := v_0
18242 if v_1.Op != OpAMD64TESTQ {
18243 break
18244 }
18245 _ = v_1.Args[1]
18246 v_1_0 := v_1.Args[0]
18247 v_1_1 := v_1.Args[1]
18248 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18249 z1 := v_1_0
18250 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18251 continue
18252 }
18253 x := z1.Args[0]
18254 z2 := v_1_1
18255 mem := v_2
18256 if !(z1 == z2) {
18257 continue
18258 }
18259 v.reset(OpAMD64SETAEstore)
18260 v.AuxInt = int32ToAuxInt(off)
18261 v.Aux = symToAux(sym)
18262 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18263 v0.AuxInt = int8ToAuxInt(63)
18264 v0.AddArg(x)
18265 v.AddArg3(ptr, v0, mem)
18266 return true
18267 }
18268 break
18269 }
18270
18271
18272
18273 for {
18274 off := auxIntToInt32(v.AuxInt)
18275 sym := auxToSym(v.Aux)
18276 ptr := v_0
18277 if v_1.Op != OpAMD64TESTL {
18278 break
18279 }
18280 _ = v_1.Args[1]
18281 v_1_0 := v_1.Args[0]
18282 v_1_1 := v_1.Args[1]
18283 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18284 z1 := v_1_0
18285 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18286 continue
18287 }
18288 x := z1.Args[0]
18289 z2 := v_1_1
18290 mem := v_2
18291 if !(z1 == z2) {
18292 continue
18293 }
18294 v.reset(OpAMD64SETAEstore)
18295 v.AuxInt = int32ToAuxInt(off)
18296 v.Aux = symToAux(sym)
18297 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18298 v0.AuxInt = int8ToAuxInt(31)
18299 v0.AddArg(x)
18300 v.AddArg3(ptr, v0, mem)
18301 return true
18302 }
18303 break
18304 }
18305
18306
18307 for {
18308 off := auxIntToInt32(v.AuxInt)
18309 sym := auxToSym(v.Aux)
18310 ptr := v_0
18311 if v_1.Op != OpAMD64InvertFlags {
18312 break
18313 }
18314 x := v_1.Args[0]
18315 mem := v_2
18316 v.reset(OpAMD64SETEQstore)
18317 v.AuxInt = int32ToAuxInt(off)
18318 v.Aux = symToAux(sym)
18319 v.AddArg3(ptr, x, mem)
18320 return true
18321 }
18322
18323
18324
18325 for {
18326 off1 := auxIntToInt32(v.AuxInt)
18327 sym := auxToSym(v.Aux)
18328 if v_0.Op != OpAMD64ADDQconst {
18329 break
18330 }
18331 off2 := auxIntToInt32(v_0.AuxInt)
18332 base := v_0.Args[0]
18333 val := v_1
18334 mem := v_2
18335 if !(is32Bit(int64(off1) + int64(off2))) {
18336 break
18337 }
18338 v.reset(OpAMD64SETEQstore)
18339 v.AuxInt = int32ToAuxInt(off1 + off2)
18340 v.Aux = symToAux(sym)
18341 v.AddArg3(base, val, mem)
18342 return true
18343 }
18344
18345
18346
18347 for {
18348 off1 := auxIntToInt32(v.AuxInt)
18349 sym1 := auxToSym(v.Aux)
18350 if v_0.Op != OpAMD64LEAQ {
18351 break
18352 }
18353 off2 := auxIntToInt32(v_0.AuxInt)
18354 sym2 := auxToSym(v_0.Aux)
18355 base := v_0.Args[0]
18356 val := v_1
18357 mem := v_2
18358 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18359 break
18360 }
18361 v.reset(OpAMD64SETEQstore)
18362 v.AuxInt = int32ToAuxInt(off1 + off2)
18363 v.Aux = symToAux(mergeSym(sym1, sym2))
18364 v.AddArg3(base, val, mem)
18365 return true
18366 }
18367
18368
18369 for {
18370 off := auxIntToInt32(v.AuxInt)
18371 sym := auxToSym(v.Aux)
18372 ptr := v_0
18373 if v_1.Op != OpAMD64FlagEQ {
18374 break
18375 }
18376 mem := v_2
18377 v.reset(OpAMD64MOVBstore)
18378 v.AuxInt = int32ToAuxInt(off)
18379 v.Aux = symToAux(sym)
18380 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18381 v0.AuxInt = int32ToAuxInt(1)
18382 v.AddArg3(ptr, v0, mem)
18383 return true
18384 }
18385
18386
18387 for {
18388 off := auxIntToInt32(v.AuxInt)
18389 sym := auxToSym(v.Aux)
18390 ptr := v_0
18391 if v_1.Op != OpAMD64FlagLT_ULT {
18392 break
18393 }
18394 mem := v_2
18395 v.reset(OpAMD64MOVBstore)
18396 v.AuxInt = int32ToAuxInt(off)
18397 v.Aux = symToAux(sym)
18398 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18399 v0.AuxInt = int32ToAuxInt(0)
18400 v.AddArg3(ptr, v0, mem)
18401 return true
18402 }
18403
18404
18405 for {
18406 off := auxIntToInt32(v.AuxInt)
18407 sym := auxToSym(v.Aux)
18408 ptr := v_0
18409 if v_1.Op != OpAMD64FlagLT_UGT {
18410 break
18411 }
18412 mem := v_2
18413 v.reset(OpAMD64MOVBstore)
18414 v.AuxInt = int32ToAuxInt(off)
18415 v.Aux = symToAux(sym)
18416 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18417 v0.AuxInt = int32ToAuxInt(0)
18418 v.AddArg3(ptr, v0, mem)
18419 return true
18420 }
18421
18422
18423 for {
18424 off := auxIntToInt32(v.AuxInt)
18425 sym := auxToSym(v.Aux)
18426 ptr := v_0
18427 if v_1.Op != OpAMD64FlagGT_ULT {
18428 break
18429 }
18430 mem := v_2
18431 v.reset(OpAMD64MOVBstore)
18432 v.AuxInt = int32ToAuxInt(off)
18433 v.Aux = symToAux(sym)
18434 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18435 v0.AuxInt = int32ToAuxInt(0)
18436 v.AddArg3(ptr, v0, mem)
18437 return true
18438 }
18439
18440
18441 for {
18442 off := auxIntToInt32(v.AuxInt)
18443 sym := auxToSym(v.Aux)
18444 ptr := v_0
18445 if v_1.Op != OpAMD64FlagGT_UGT {
18446 break
18447 }
18448 mem := v_2
18449 v.reset(OpAMD64MOVBstore)
18450 v.AuxInt = int32ToAuxInt(off)
18451 v.Aux = symToAux(sym)
18452 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18453 v0.AuxInt = int32ToAuxInt(0)
18454 v.AddArg3(ptr, v0, mem)
18455 return true
18456 }
18457 return false
18458 }
18459 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
18460 v_0 := v.Args[0]
18461
18462
18463 for {
18464 if v_0.Op != OpAMD64InvertFlags {
18465 break
18466 }
18467 x := v_0.Args[0]
18468 v.reset(OpAMD64SETL)
18469 v.AddArg(x)
18470 return true
18471 }
18472
18473
18474 for {
18475 if v_0.Op != OpAMD64FlagEQ {
18476 break
18477 }
18478 v.reset(OpAMD64MOVLconst)
18479 v.AuxInt = int32ToAuxInt(0)
18480 return true
18481 }
18482
18483
18484 for {
18485 if v_0.Op != OpAMD64FlagLT_ULT {
18486 break
18487 }
18488 v.reset(OpAMD64MOVLconst)
18489 v.AuxInt = int32ToAuxInt(0)
18490 return true
18491 }
18492
18493
18494 for {
18495 if v_0.Op != OpAMD64FlagLT_UGT {
18496 break
18497 }
18498 v.reset(OpAMD64MOVLconst)
18499 v.AuxInt = int32ToAuxInt(0)
18500 return true
18501 }
18502
18503
18504 for {
18505 if v_0.Op != OpAMD64FlagGT_ULT {
18506 break
18507 }
18508 v.reset(OpAMD64MOVLconst)
18509 v.AuxInt = int32ToAuxInt(1)
18510 return true
18511 }
18512
18513
18514 for {
18515 if v_0.Op != OpAMD64FlagGT_UGT {
18516 break
18517 }
18518 v.reset(OpAMD64MOVLconst)
18519 v.AuxInt = int32ToAuxInt(1)
18520 return true
18521 }
18522 return false
18523 }
18524 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
18525 v_0 := v.Args[0]
18526
18527
18528 for {
18529 if v_0.Op != OpAMD64InvertFlags {
18530 break
18531 }
18532 x := v_0.Args[0]
18533 v.reset(OpAMD64SETLE)
18534 v.AddArg(x)
18535 return true
18536 }
18537
18538
18539 for {
18540 if v_0.Op != OpAMD64FlagEQ {
18541 break
18542 }
18543 v.reset(OpAMD64MOVLconst)
18544 v.AuxInt = int32ToAuxInt(1)
18545 return true
18546 }
18547
18548
18549 for {
18550 if v_0.Op != OpAMD64FlagLT_ULT {
18551 break
18552 }
18553 v.reset(OpAMD64MOVLconst)
18554 v.AuxInt = int32ToAuxInt(0)
18555 return true
18556 }
18557
18558
18559 for {
18560 if v_0.Op != OpAMD64FlagLT_UGT {
18561 break
18562 }
18563 v.reset(OpAMD64MOVLconst)
18564 v.AuxInt = int32ToAuxInt(0)
18565 return true
18566 }
18567
18568
18569 for {
18570 if v_0.Op != OpAMD64FlagGT_ULT {
18571 break
18572 }
18573 v.reset(OpAMD64MOVLconst)
18574 v.AuxInt = int32ToAuxInt(1)
18575 return true
18576 }
18577
18578
18579 for {
18580 if v_0.Op != OpAMD64FlagGT_UGT {
18581 break
18582 }
18583 v.reset(OpAMD64MOVLconst)
18584 v.AuxInt = int32ToAuxInt(1)
18585 return true
18586 }
18587 return false
18588 }
18589 func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
18590 v_2 := v.Args[2]
18591 v_1 := v.Args[1]
18592 v_0 := v.Args[0]
18593 b := v.Block
18594 typ := &b.Func.Config.Types
18595
18596
18597 for {
18598 off := auxIntToInt32(v.AuxInt)
18599 sym := auxToSym(v.Aux)
18600 ptr := v_0
18601 if v_1.Op != OpAMD64InvertFlags {
18602 break
18603 }
18604 x := v_1.Args[0]
18605 mem := v_2
18606 v.reset(OpAMD64SETLEstore)
18607 v.AuxInt = int32ToAuxInt(off)
18608 v.Aux = symToAux(sym)
18609 v.AddArg3(ptr, x, mem)
18610 return true
18611 }
18612
18613
18614
18615 for {
18616 off1 := auxIntToInt32(v.AuxInt)
18617 sym := auxToSym(v.Aux)
18618 if v_0.Op != OpAMD64ADDQconst {
18619 break
18620 }
18621 off2 := auxIntToInt32(v_0.AuxInt)
18622 base := v_0.Args[0]
18623 val := v_1
18624 mem := v_2
18625 if !(is32Bit(int64(off1) + int64(off2))) {
18626 break
18627 }
18628 v.reset(OpAMD64SETGEstore)
18629 v.AuxInt = int32ToAuxInt(off1 + off2)
18630 v.Aux = symToAux(sym)
18631 v.AddArg3(base, val, mem)
18632 return true
18633 }
18634
18635
18636
18637 for {
18638 off1 := auxIntToInt32(v.AuxInt)
18639 sym1 := auxToSym(v.Aux)
18640 if v_0.Op != OpAMD64LEAQ {
18641 break
18642 }
18643 off2 := auxIntToInt32(v_0.AuxInt)
18644 sym2 := auxToSym(v_0.Aux)
18645 base := v_0.Args[0]
18646 val := v_1
18647 mem := v_2
18648 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18649 break
18650 }
18651 v.reset(OpAMD64SETGEstore)
18652 v.AuxInt = int32ToAuxInt(off1 + off2)
18653 v.Aux = symToAux(mergeSym(sym1, sym2))
18654 v.AddArg3(base, val, mem)
18655 return true
18656 }
18657
18658
18659 for {
18660 off := auxIntToInt32(v.AuxInt)
18661 sym := auxToSym(v.Aux)
18662 ptr := v_0
18663 if v_1.Op != OpAMD64FlagEQ {
18664 break
18665 }
18666 mem := v_2
18667 v.reset(OpAMD64MOVBstore)
18668 v.AuxInt = int32ToAuxInt(off)
18669 v.Aux = symToAux(sym)
18670 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18671 v0.AuxInt = int32ToAuxInt(1)
18672 v.AddArg3(ptr, v0, mem)
18673 return true
18674 }
18675
18676
18677 for {
18678 off := auxIntToInt32(v.AuxInt)
18679 sym := auxToSym(v.Aux)
18680 ptr := v_0
18681 if v_1.Op != OpAMD64FlagLT_ULT {
18682 break
18683 }
18684 mem := v_2
18685 v.reset(OpAMD64MOVBstore)
18686 v.AuxInt = int32ToAuxInt(off)
18687 v.Aux = symToAux(sym)
18688 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18689 v0.AuxInt = int32ToAuxInt(0)
18690 v.AddArg3(ptr, v0, mem)
18691 return true
18692 }
18693
18694
18695 for {
18696 off := auxIntToInt32(v.AuxInt)
18697 sym := auxToSym(v.Aux)
18698 ptr := v_0
18699 if v_1.Op != OpAMD64FlagLT_UGT {
18700 break
18701 }
18702 mem := v_2
18703 v.reset(OpAMD64MOVBstore)
18704 v.AuxInt = int32ToAuxInt(off)
18705 v.Aux = symToAux(sym)
18706 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18707 v0.AuxInt = int32ToAuxInt(0)
18708 v.AddArg3(ptr, v0, mem)
18709 return true
18710 }
18711
18712
18713 for {
18714 off := auxIntToInt32(v.AuxInt)
18715 sym := auxToSym(v.Aux)
18716 ptr := v_0
18717 if v_1.Op != OpAMD64FlagGT_ULT {
18718 break
18719 }
18720 mem := v_2
18721 v.reset(OpAMD64MOVBstore)
18722 v.AuxInt = int32ToAuxInt(off)
18723 v.Aux = symToAux(sym)
18724 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18725 v0.AuxInt = int32ToAuxInt(1)
18726 v.AddArg3(ptr, v0, mem)
18727 return true
18728 }
18729
18730
18731 for {
18732 off := auxIntToInt32(v.AuxInt)
18733 sym := auxToSym(v.Aux)
18734 ptr := v_0
18735 if v_1.Op != OpAMD64FlagGT_UGT {
18736 break
18737 }
18738 mem := v_2
18739 v.reset(OpAMD64MOVBstore)
18740 v.AuxInt = int32ToAuxInt(off)
18741 v.Aux = symToAux(sym)
18742 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18743 v0.AuxInt = int32ToAuxInt(1)
18744 v.AddArg3(ptr, v0, mem)
18745 return true
18746 }
18747 return false
18748 }
18749 func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
18750 v_2 := v.Args[2]
18751 v_1 := v.Args[1]
18752 v_0 := v.Args[0]
18753 b := v.Block
18754 typ := &b.Func.Config.Types
18755
18756
18757 for {
18758 off := auxIntToInt32(v.AuxInt)
18759 sym := auxToSym(v.Aux)
18760 ptr := v_0
18761 if v_1.Op != OpAMD64InvertFlags {
18762 break
18763 }
18764 x := v_1.Args[0]
18765 mem := v_2
18766 v.reset(OpAMD64SETLstore)
18767 v.AuxInt = int32ToAuxInt(off)
18768 v.Aux = symToAux(sym)
18769 v.AddArg3(ptr, x, mem)
18770 return true
18771 }
18772
18773
18774
18775 for {
18776 off1 := auxIntToInt32(v.AuxInt)
18777 sym := auxToSym(v.Aux)
18778 if v_0.Op != OpAMD64ADDQconst {
18779 break
18780 }
18781 off2 := auxIntToInt32(v_0.AuxInt)
18782 base := v_0.Args[0]
18783 val := v_1
18784 mem := v_2
18785 if !(is32Bit(int64(off1) + int64(off2))) {
18786 break
18787 }
18788 v.reset(OpAMD64SETGstore)
18789 v.AuxInt = int32ToAuxInt(off1 + off2)
18790 v.Aux = symToAux(sym)
18791 v.AddArg3(base, val, mem)
18792 return true
18793 }
18794
18795
18796
18797 for {
18798 off1 := auxIntToInt32(v.AuxInt)
18799 sym1 := auxToSym(v.Aux)
18800 if v_0.Op != OpAMD64LEAQ {
18801 break
18802 }
18803 off2 := auxIntToInt32(v_0.AuxInt)
18804 sym2 := auxToSym(v_0.Aux)
18805 base := v_0.Args[0]
18806 val := v_1
18807 mem := v_2
18808 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18809 break
18810 }
18811 v.reset(OpAMD64SETGstore)
18812 v.AuxInt = int32ToAuxInt(off1 + off2)
18813 v.Aux = symToAux(mergeSym(sym1, sym2))
18814 v.AddArg3(base, val, mem)
18815 return true
18816 }
18817
18818
18819 for {
18820 off := auxIntToInt32(v.AuxInt)
18821 sym := auxToSym(v.Aux)
18822 ptr := v_0
18823 if v_1.Op != OpAMD64FlagEQ {
18824 break
18825 }
18826 mem := v_2
18827 v.reset(OpAMD64MOVBstore)
18828 v.AuxInt = int32ToAuxInt(off)
18829 v.Aux = symToAux(sym)
18830 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18831 v0.AuxInt = int32ToAuxInt(0)
18832 v.AddArg3(ptr, v0, mem)
18833 return true
18834 }
18835
18836
18837 for {
18838 off := auxIntToInt32(v.AuxInt)
18839 sym := auxToSym(v.Aux)
18840 ptr := v_0
18841 if v_1.Op != OpAMD64FlagLT_ULT {
18842 break
18843 }
18844 mem := v_2
18845 v.reset(OpAMD64MOVBstore)
18846 v.AuxInt = int32ToAuxInt(off)
18847 v.Aux = symToAux(sym)
18848 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18849 v0.AuxInt = int32ToAuxInt(0)
18850 v.AddArg3(ptr, v0, mem)
18851 return true
18852 }
18853
18854
18855 for {
18856 off := auxIntToInt32(v.AuxInt)
18857 sym := auxToSym(v.Aux)
18858 ptr := v_0
18859 if v_1.Op != OpAMD64FlagLT_UGT {
18860 break
18861 }
18862 mem := v_2
18863 v.reset(OpAMD64MOVBstore)
18864 v.AuxInt = int32ToAuxInt(off)
18865 v.Aux = symToAux(sym)
18866 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18867 v0.AuxInt = int32ToAuxInt(0)
18868 v.AddArg3(ptr, v0, mem)
18869 return true
18870 }
18871
18872
18873 for {
18874 off := auxIntToInt32(v.AuxInt)
18875 sym := auxToSym(v.Aux)
18876 ptr := v_0
18877 if v_1.Op != OpAMD64FlagGT_ULT {
18878 break
18879 }
18880 mem := v_2
18881 v.reset(OpAMD64MOVBstore)
18882 v.AuxInt = int32ToAuxInt(off)
18883 v.Aux = symToAux(sym)
18884 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18885 v0.AuxInt = int32ToAuxInt(1)
18886 v.AddArg3(ptr, v0, mem)
18887 return true
18888 }
18889
18890
18891 for {
18892 off := auxIntToInt32(v.AuxInt)
18893 sym := auxToSym(v.Aux)
18894 ptr := v_0
18895 if v_1.Op != OpAMD64FlagGT_UGT {
18896 break
18897 }
18898 mem := v_2
18899 v.reset(OpAMD64MOVBstore)
18900 v.AuxInt = int32ToAuxInt(off)
18901 v.Aux = symToAux(sym)
18902 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18903 v0.AuxInt = int32ToAuxInt(1)
18904 v.AddArg3(ptr, v0, mem)
18905 return true
18906 }
18907 return false
18908 }
18909 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
18910 v_0 := v.Args[0]
18911
18912
18913 for {
18914 if v_0.Op != OpAMD64InvertFlags {
18915 break
18916 }
18917 x := v_0.Args[0]
18918 v.reset(OpAMD64SETG)
18919 v.AddArg(x)
18920 return true
18921 }
18922
18923
18924 for {
18925 if v_0.Op != OpAMD64FlagEQ {
18926 break
18927 }
18928 v.reset(OpAMD64MOVLconst)
18929 v.AuxInt = int32ToAuxInt(0)
18930 return true
18931 }
18932
18933
18934 for {
18935 if v_0.Op != OpAMD64FlagLT_ULT {
18936 break
18937 }
18938 v.reset(OpAMD64MOVLconst)
18939 v.AuxInt = int32ToAuxInt(1)
18940 return true
18941 }
18942
18943
18944 for {
18945 if v_0.Op != OpAMD64FlagLT_UGT {
18946 break
18947 }
18948 v.reset(OpAMD64MOVLconst)
18949 v.AuxInt = int32ToAuxInt(1)
18950 return true
18951 }
18952
18953
18954 for {
18955 if v_0.Op != OpAMD64FlagGT_ULT {
18956 break
18957 }
18958 v.reset(OpAMD64MOVLconst)
18959 v.AuxInt = int32ToAuxInt(0)
18960 return true
18961 }
18962
18963
18964 for {
18965 if v_0.Op != OpAMD64FlagGT_UGT {
18966 break
18967 }
18968 v.reset(OpAMD64MOVLconst)
18969 v.AuxInt = int32ToAuxInt(0)
18970 return true
18971 }
18972 return false
18973 }
18974 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
18975 v_0 := v.Args[0]
18976
18977
18978 for {
18979 if v_0.Op != OpAMD64InvertFlags {
18980 break
18981 }
18982 x := v_0.Args[0]
18983 v.reset(OpAMD64SETGE)
18984 v.AddArg(x)
18985 return true
18986 }
18987
18988
18989 for {
18990 if v_0.Op != OpAMD64FlagEQ {
18991 break
18992 }
18993 v.reset(OpAMD64MOVLconst)
18994 v.AuxInt = int32ToAuxInt(1)
18995 return true
18996 }
18997
18998
18999 for {
19000 if v_0.Op != OpAMD64FlagLT_ULT {
19001 break
19002 }
19003 v.reset(OpAMD64MOVLconst)
19004 v.AuxInt = int32ToAuxInt(1)
19005 return true
19006 }
19007
19008
19009 for {
19010 if v_0.Op != OpAMD64FlagLT_UGT {
19011 break
19012 }
19013 v.reset(OpAMD64MOVLconst)
19014 v.AuxInt = int32ToAuxInt(1)
19015 return true
19016 }
19017
19018
19019 for {
19020 if v_0.Op != OpAMD64FlagGT_ULT {
19021 break
19022 }
19023 v.reset(OpAMD64MOVLconst)
19024 v.AuxInt = int32ToAuxInt(0)
19025 return true
19026 }
19027
19028
19029 for {
19030 if v_0.Op != OpAMD64FlagGT_UGT {
19031 break
19032 }
19033 v.reset(OpAMD64MOVLconst)
19034 v.AuxInt = int32ToAuxInt(0)
19035 return true
19036 }
19037 return false
19038 }
19039 func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
19040 v_2 := v.Args[2]
19041 v_1 := v.Args[1]
19042 v_0 := v.Args[0]
19043 b := v.Block
19044 typ := &b.Func.Config.Types
19045
19046
19047 for {
19048 off := auxIntToInt32(v.AuxInt)
19049 sym := auxToSym(v.Aux)
19050 ptr := v_0
19051 if v_1.Op != OpAMD64InvertFlags {
19052 break
19053 }
19054 x := v_1.Args[0]
19055 mem := v_2
19056 v.reset(OpAMD64SETGEstore)
19057 v.AuxInt = int32ToAuxInt(off)
19058 v.Aux = symToAux(sym)
19059 v.AddArg3(ptr, x, mem)
19060 return true
19061 }
19062
19063
19064
19065 for {
19066 off1 := auxIntToInt32(v.AuxInt)
19067 sym := auxToSym(v.Aux)
19068 if v_0.Op != OpAMD64ADDQconst {
19069 break
19070 }
19071 off2 := auxIntToInt32(v_0.AuxInt)
19072 base := v_0.Args[0]
19073 val := v_1
19074 mem := v_2
19075 if !(is32Bit(int64(off1) + int64(off2))) {
19076 break
19077 }
19078 v.reset(OpAMD64SETLEstore)
19079 v.AuxInt = int32ToAuxInt(off1 + off2)
19080 v.Aux = symToAux(sym)
19081 v.AddArg3(base, val, mem)
19082 return true
19083 }
19084
19085
19086
19087 for {
19088 off1 := auxIntToInt32(v.AuxInt)
19089 sym1 := auxToSym(v.Aux)
19090 if v_0.Op != OpAMD64LEAQ {
19091 break
19092 }
19093 off2 := auxIntToInt32(v_0.AuxInt)
19094 sym2 := auxToSym(v_0.Aux)
19095 base := v_0.Args[0]
19096 val := v_1
19097 mem := v_2
19098 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19099 break
19100 }
19101 v.reset(OpAMD64SETLEstore)
19102 v.AuxInt = int32ToAuxInt(off1 + off2)
19103 v.Aux = symToAux(mergeSym(sym1, sym2))
19104 v.AddArg3(base, val, mem)
19105 return true
19106 }
19107
19108
19109 for {
19110 off := auxIntToInt32(v.AuxInt)
19111 sym := auxToSym(v.Aux)
19112 ptr := v_0
19113 if v_1.Op != OpAMD64FlagEQ {
19114 break
19115 }
19116 mem := v_2
19117 v.reset(OpAMD64MOVBstore)
19118 v.AuxInt = int32ToAuxInt(off)
19119 v.Aux = symToAux(sym)
19120 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19121 v0.AuxInt = int32ToAuxInt(1)
19122 v.AddArg3(ptr, v0, mem)
19123 return true
19124 }
19125
19126
19127 for {
19128 off := auxIntToInt32(v.AuxInt)
19129 sym := auxToSym(v.Aux)
19130 ptr := v_0
19131 if v_1.Op != OpAMD64FlagLT_ULT {
19132 break
19133 }
19134 mem := v_2
19135 v.reset(OpAMD64MOVBstore)
19136 v.AuxInt = int32ToAuxInt(off)
19137 v.Aux = symToAux(sym)
19138 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19139 v0.AuxInt = int32ToAuxInt(1)
19140 v.AddArg3(ptr, v0, mem)
19141 return true
19142 }
19143
19144
19145 for {
19146 off := auxIntToInt32(v.AuxInt)
19147 sym := auxToSym(v.Aux)
19148 ptr := v_0
19149 if v_1.Op != OpAMD64FlagLT_UGT {
19150 break
19151 }
19152 mem := v_2
19153 v.reset(OpAMD64MOVBstore)
19154 v.AuxInt = int32ToAuxInt(off)
19155 v.Aux = symToAux(sym)
19156 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19157 v0.AuxInt = int32ToAuxInt(1)
19158 v.AddArg3(ptr, v0, mem)
19159 return true
19160 }
19161
19162
19163 for {
19164 off := auxIntToInt32(v.AuxInt)
19165 sym := auxToSym(v.Aux)
19166 ptr := v_0
19167 if v_1.Op != OpAMD64FlagGT_ULT {
19168 break
19169 }
19170 mem := v_2
19171 v.reset(OpAMD64MOVBstore)
19172 v.AuxInt = int32ToAuxInt(off)
19173 v.Aux = symToAux(sym)
19174 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19175 v0.AuxInt = int32ToAuxInt(0)
19176 v.AddArg3(ptr, v0, mem)
19177 return true
19178 }
19179
19180
19181 for {
19182 off := auxIntToInt32(v.AuxInt)
19183 sym := auxToSym(v.Aux)
19184 ptr := v_0
19185 if v_1.Op != OpAMD64FlagGT_UGT {
19186 break
19187 }
19188 mem := v_2
19189 v.reset(OpAMD64MOVBstore)
19190 v.AuxInt = int32ToAuxInt(off)
19191 v.Aux = symToAux(sym)
19192 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19193 v0.AuxInt = int32ToAuxInt(0)
19194 v.AddArg3(ptr, v0, mem)
19195 return true
19196 }
19197 return false
19198 }
19199 func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
19200 v_2 := v.Args[2]
19201 v_1 := v.Args[1]
19202 v_0 := v.Args[0]
19203 b := v.Block
19204 typ := &b.Func.Config.Types
19205
19206
19207 for {
19208 off := auxIntToInt32(v.AuxInt)
19209 sym := auxToSym(v.Aux)
19210 ptr := v_0
19211 if v_1.Op != OpAMD64InvertFlags {
19212 break
19213 }
19214 x := v_1.Args[0]
19215 mem := v_2
19216 v.reset(OpAMD64SETGstore)
19217 v.AuxInt = int32ToAuxInt(off)
19218 v.Aux = symToAux(sym)
19219 v.AddArg3(ptr, x, mem)
19220 return true
19221 }
19222
19223
19224
19225 for {
19226 off1 := auxIntToInt32(v.AuxInt)
19227 sym := auxToSym(v.Aux)
19228 if v_0.Op != OpAMD64ADDQconst {
19229 break
19230 }
19231 off2 := auxIntToInt32(v_0.AuxInt)
19232 base := v_0.Args[0]
19233 val := v_1
19234 mem := v_2
19235 if !(is32Bit(int64(off1) + int64(off2))) {
19236 break
19237 }
19238 v.reset(OpAMD64SETLstore)
19239 v.AuxInt = int32ToAuxInt(off1 + off2)
19240 v.Aux = symToAux(sym)
19241 v.AddArg3(base, val, mem)
19242 return true
19243 }
19244
19245
19246
19247 for {
19248 off1 := auxIntToInt32(v.AuxInt)
19249 sym1 := auxToSym(v.Aux)
19250 if v_0.Op != OpAMD64LEAQ {
19251 break
19252 }
19253 off2 := auxIntToInt32(v_0.AuxInt)
19254 sym2 := auxToSym(v_0.Aux)
19255 base := v_0.Args[0]
19256 val := v_1
19257 mem := v_2
19258 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19259 break
19260 }
19261 v.reset(OpAMD64SETLstore)
19262 v.AuxInt = int32ToAuxInt(off1 + off2)
19263 v.Aux = symToAux(mergeSym(sym1, sym2))
19264 v.AddArg3(base, val, mem)
19265 return true
19266 }
19267
19268
19269 for {
19270 off := auxIntToInt32(v.AuxInt)
19271 sym := auxToSym(v.Aux)
19272 ptr := v_0
19273 if v_1.Op != OpAMD64FlagEQ {
19274 break
19275 }
19276 mem := v_2
19277 v.reset(OpAMD64MOVBstore)
19278 v.AuxInt = int32ToAuxInt(off)
19279 v.Aux = symToAux(sym)
19280 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19281 v0.AuxInt = int32ToAuxInt(0)
19282 v.AddArg3(ptr, v0, mem)
19283 return true
19284 }
19285
19286
19287 for {
19288 off := auxIntToInt32(v.AuxInt)
19289 sym := auxToSym(v.Aux)
19290 ptr := v_0
19291 if v_1.Op != OpAMD64FlagLT_ULT {
19292 break
19293 }
19294 mem := v_2
19295 v.reset(OpAMD64MOVBstore)
19296 v.AuxInt = int32ToAuxInt(off)
19297 v.Aux = symToAux(sym)
19298 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19299 v0.AuxInt = int32ToAuxInt(1)
19300 v.AddArg3(ptr, v0, mem)
19301 return true
19302 }
19303
19304
19305 for {
19306 off := auxIntToInt32(v.AuxInt)
19307 sym := auxToSym(v.Aux)
19308 ptr := v_0
19309 if v_1.Op != OpAMD64FlagLT_UGT {
19310 break
19311 }
19312 mem := v_2
19313 v.reset(OpAMD64MOVBstore)
19314 v.AuxInt = int32ToAuxInt(off)
19315 v.Aux = symToAux(sym)
19316 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19317 v0.AuxInt = int32ToAuxInt(1)
19318 v.AddArg3(ptr, v0, mem)
19319 return true
19320 }
19321
19322
19323 for {
19324 off := auxIntToInt32(v.AuxInt)
19325 sym := auxToSym(v.Aux)
19326 ptr := v_0
19327 if v_1.Op != OpAMD64FlagGT_ULT {
19328 break
19329 }
19330 mem := v_2
19331 v.reset(OpAMD64MOVBstore)
19332 v.AuxInt = int32ToAuxInt(off)
19333 v.Aux = symToAux(sym)
19334 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19335 v0.AuxInt = int32ToAuxInt(0)
19336 v.AddArg3(ptr, v0, mem)
19337 return true
19338 }
19339
19340
19341 for {
19342 off := auxIntToInt32(v.AuxInt)
19343 sym := auxToSym(v.Aux)
19344 ptr := v_0
19345 if v_1.Op != OpAMD64FlagGT_UGT {
19346 break
19347 }
19348 mem := v_2
19349 v.reset(OpAMD64MOVBstore)
19350 v.AuxInt = int32ToAuxInt(off)
19351 v.Aux = symToAux(sym)
19352 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19353 v0.AuxInt = int32ToAuxInt(0)
19354 v.AddArg3(ptr, v0, mem)
19355 return true
19356 }
19357 return false
19358 }
19359 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
19360 v_0 := v.Args[0]
19361 b := v.Block
19362
19363
19364 for {
19365 if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
19366 break
19367 }
19368 x := v_0.Args[0]
19369 v.reset(OpAMD64ANDLconst)
19370 v.AuxInt = int32ToAuxInt(1)
19371 v.AddArg(x)
19372 return true
19373 }
19374
19375
19376 for {
19377 if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
19378 break
19379 }
19380 x := v_0.Args[0]
19381 v.reset(OpAMD64ANDLconst)
19382 v.AuxInt = int32ToAuxInt(1)
19383 v.AddArg(x)
19384 return true
19385 }
19386
19387
19388 for {
19389 if v_0.Op != OpAMD64TESTL {
19390 break
19391 }
19392 _ = v_0.Args[1]
19393 v_0_0 := v_0.Args[0]
19394 v_0_1 := v_0.Args[1]
19395 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19396 if v_0_0.Op != OpAMD64SHLL {
19397 continue
19398 }
19399 x := v_0_0.Args[1]
19400 v_0_0_0 := v_0_0.Args[0]
19401 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
19402 continue
19403 }
19404 y := v_0_1
19405 v.reset(OpAMD64SETB)
19406 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
19407 v0.AddArg2(x, y)
19408 v.AddArg(v0)
19409 return true
19410 }
19411 break
19412 }
19413
19414
19415 for {
19416 if v_0.Op != OpAMD64TESTQ {
19417 break
19418 }
19419 _ = v_0.Args[1]
19420 v_0_0 := v_0.Args[0]
19421 v_0_1 := v_0.Args[1]
19422 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19423 if v_0_0.Op != OpAMD64SHLQ {
19424 continue
19425 }
19426 x := v_0_0.Args[1]
19427 v_0_0_0 := v_0_0.Args[0]
19428 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
19429 continue
19430 }
19431 y := v_0_1
19432 v.reset(OpAMD64SETB)
19433 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
19434 v0.AddArg2(x, y)
19435 v.AddArg(v0)
19436 return true
19437 }
19438 break
19439 }
19440
19441
19442
19443 for {
19444 if v_0.Op != OpAMD64TESTLconst {
19445 break
19446 }
19447 c := auxIntToInt32(v_0.AuxInt)
19448 x := v_0.Args[0]
19449 if !(isUint32PowerOfTwo(int64(c))) {
19450 break
19451 }
19452 v.reset(OpAMD64SETB)
19453 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19454 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19455 v0.AddArg(x)
19456 v.AddArg(v0)
19457 return true
19458 }
19459
19460
19461
19462 for {
19463 if v_0.Op != OpAMD64TESTQconst {
19464 break
19465 }
19466 c := auxIntToInt32(v_0.AuxInt)
19467 x := v_0.Args[0]
19468 if !(isUint64PowerOfTwo(int64(c))) {
19469 break
19470 }
19471 v.reset(OpAMD64SETB)
19472 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19473 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19474 v0.AddArg(x)
19475 v.AddArg(v0)
19476 return true
19477 }
19478
19479
19480
19481 for {
19482 if v_0.Op != OpAMD64TESTQ {
19483 break
19484 }
19485 _ = v_0.Args[1]
19486 v_0_0 := v_0.Args[0]
19487 v_0_1 := v_0.Args[1]
19488 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19489 if v_0_0.Op != OpAMD64MOVQconst {
19490 continue
19491 }
19492 c := auxIntToInt64(v_0_0.AuxInt)
19493 x := v_0_1
19494 if !(isUint64PowerOfTwo(c)) {
19495 continue
19496 }
19497 v.reset(OpAMD64SETB)
19498 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19499 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
19500 v0.AddArg(x)
19501 v.AddArg(v0)
19502 return true
19503 }
19504 break
19505 }
19506
19507
19508 for {
19509 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
19510 break
19511 }
19512 s := v_0.Args[0]
19513 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
19514 break
19515 }
19516 v.reset(OpAMD64SETEQ)
19517 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
19518 v0.AuxInt = int32ToAuxInt(0)
19519 v0.AddArg(s)
19520 v.AddArg(v0)
19521 return true
19522 }
19523
19524
19525 for {
19526 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
19527 break
19528 }
19529 s := v_0.Args[0]
19530 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
19531 break
19532 }
19533 v.reset(OpAMD64SETEQ)
19534 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
19535 v0.AuxInt = int32ToAuxInt(0)
19536 v0.AddArg(s)
19537 v.AddArg(v0)
19538 return true
19539 }
19540
19541
19542
19543 for {
19544 if v_0.Op != OpAMD64TESTQ {
19545 break
19546 }
19547 _ = v_0.Args[1]
19548 v_0_0 := v_0.Args[0]
19549 v_0_1 := v_0.Args[1]
19550 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19551 z1 := v_0_0
19552 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
19553 continue
19554 }
19555 z1_0 := z1.Args[0]
19556 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19557 continue
19558 }
19559 x := z1_0.Args[0]
19560 z2 := v_0_1
19561 if !(z1 == z2) {
19562 continue
19563 }
19564 v.reset(OpAMD64SETB)
19565 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19566 v0.AuxInt = int8ToAuxInt(63)
19567 v0.AddArg(x)
19568 v.AddArg(v0)
19569 return true
19570 }
19571 break
19572 }
19573
19574
19575
19576 for {
19577 if v_0.Op != OpAMD64TESTL {
19578 break
19579 }
19580 _ = v_0.Args[1]
19581 v_0_0 := v_0.Args[0]
19582 v_0_1 := v_0.Args[1]
19583 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19584 z1 := v_0_0
19585 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
19586 continue
19587 }
19588 z1_0 := z1.Args[0]
19589 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
19590 continue
19591 }
19592 x := z1_0.Args[0]
19593 z2 := v_0_1
19594 if !(z1 == z2) {
19595 continue
19596 }
19597 v.reset(OpAMD64SETB)
19598 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19599 v0.AuxInt = int8ToAuxInt(31)
19600 v0.AddArg(x)
19601 v.AddArg(v0)
19602 return true
19603 }
19604 break
19605 }
19606
19607
19608
19609 for {
19610 if v_0.Op != OpAMD64TESTQ {
19611 break
19612 }
19613 _ = v_0.Args[1]
19614 v_0_0 := v_0.Args[0]
19615 v_0_1 := v_0.Args[1]
19616 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19617 z1 := v_0_0
19618 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
19619 continue
19620 }
19621 z1_0 := z1.Args[0]
19622 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19623 continue
19624 }
19625 x := z1_0.Args[0]
19626 z2 := v_0_1
19627 if !(z1 == z2) {
19628 continue
19629 }
19630 v.reset(OpAMD64SETB)
19631 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19632 v0.AuxInt = int8ToAuxInt(0)
19633 v0.AddArg(x)
19634 v.AddArg(v0)
19635 return true
19636 }
19637 break
19638 }
19639
19640
19641
19642 for {
19643 if v_0.Op != OpAMD64TESTL {
19644 break
19645 }
19646 _ = v_0.Args[1]
19647 v_0_0 := v_0.Args[0]
19648 v_0_1 := v_0.Args[1]
19649 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19650 z1 := v_0_0
19651 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
19652 continue
19653 }
19654 z1_0 := z1.Args[0]
19655 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
19656 continue
19657 }
19658 x := z1_0.Args[0]
19659 z2 := v_0_1
19660 if !(z1 == z2) {
19661 continue
19662 }
19663 v.reset(OpAMD64SETB)
19664 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19665 v0.AuxInt = int8ToAuxInt(0)
19666 v0.AddArg(x)
19667 v.AddArg(v0)
19668 return true
19669 }
19670 break
19671 }
19672
19673
19674
19675 for {
19676 if v_0.Op != OpAMD64TESTQ {
19677 break
19678 }
19679 _ = v_0.Args[1]
19680 v_0_0 := v_0.Args[0]
19681 v_0_1 := v_0.Args[1]
19682 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19683 z1 := v_0_0
19684 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
19685 continue
19686 }
19687 x := z1.Args[0]
19688 z2 := v_0_1
19689 if !(z1 == z2) {
19690 continue
19691 }
19692 v.reset(OpAMD64SETB)
19693 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19694 v0.AuxInt = int8ToAuxInt(63)
19695 v0.AddArg(x)
19696 v.AddArg(v0)
19697 return true
19698 }
19699 break
19700 }
19701
19702
19703
19704 for {
19705 if v_0.Op != OpAMD64TESTL {
19706 break
19707 }
19708 _ = v_0.Args[1]
19709 v_0_0 := v_0.Args[0]
19710 v_0_1 := v_0.Args[1]
19711 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19712 z1 := v_0_0
19713 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
19714 continue
19715 }
19716 x := z1.Args[0]
19717 z2 := v_0_1
19718 if !(z1 == z2) {
19719 continue
19720 }
19721 v.reset(OpAMD64SETB)
19722 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19723 v0.AuxInt = int8ToAuxInt(31)
19724 v0.AddArg(x)
19725 v.AddArg(v0)
19726 return true
19727 }
19728 break
19729 }
19730
19731
19732 for {
19733 if v_0.Op != OpAMD64InvertFlags {
19734 break
19735 }
19736 x := v_0.Args[0]
19737 v.reset(OpAMD64SETNE)
19738 v.AddArg(x)
19739 return true
19740 }
19741
19742
19743 for {
19744 if v_0.Op != OpAMD64FlagEQ {
19745 break
19746 }
19747 v.reset(OpAMD64MOVLconst)
19748 v.AuxInt = int32ToAuxInt(0)
19749 return true
19750 }
19751
19752
19753 for {
19754 if v_0.Op != OpAMD64FlagLT_ULT {
19755 break
19756 }
19757 v.reset(OpAMD64MOVLconst)
19758 v.AuxInt = int32ToAuxInt(1)
19759 return true
19760 }
19761
19762
19763 for {
19764 if v_0.Op != OpAMD64FlagLT_UGT {
19765 break
19766 }
19767 v.reset(OpAMD64MOVLconst)
19768 v.AuxInt = int32ToAuxInt(1)
19769 return true
19770 }
19771
19772
19773 for {
19774 if v_0.Op != OpAMD64FlagGT_ULT {
19775 break
19776 }
19777 v.reset(OpAMD64MOVLconst)
19778 v.AuxInt = int32ToAuxInt(1)
19779 return true
19780 }
19781
19782
19783 for {
19784 if v_0.Op != OpAMD64FlagGT_UGT {
19785 break
19786 }
19787 v.reset(OpAMD64MOVLconst)
19788 v.AuxInt = int32ToAuxInt(1)
19789 return true
19790 }
19791
19792
19793 for {
19794 if v_0.Op != OpAMD64TESTQ {
19795 break
19796 }
19797 _ = v_0.Args[1]
19798 v_0_0 := v_0.Args[0]
19799 v_0_1 := v_0.Args[1]
19800 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19801 s := v_0_0
19802 if s.Op != OpSelect0 {
19803 continue
19804 }
19805 blsr := s.Args[0]
19806 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
19807 continue
19808 }
19809 v.reset(OpAMD64SETNE)
19810 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
19811 v0.AddArg(blsr)
19812 v.AddArg(v0)
19813 return true
19814 }
19815 break
19816 }
19817
19818
19819 for {
19820 if v_0.Op != OpAMD64TESTL {
19821 break
19822 }
19823 _ = v_0.Args[1]
19824 v_0_0 := v_0.Args[0]
19825 v_0_1 := v_0.Args[1]
19826 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19827 s := v_0_0
19828 if s.Op != OpSelect0 {
19829 continue
19830 }
19831 blsr := s.Args[0]
19832 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
19833 continue
19834 }
19835 v.reset(OpAMD64SETNE)
19836 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
19837 v0.AddArg(blsr)
19838 v.AddArg(v0)
19839 return true
19840 }
19841 break
19842 }
19843 return false
19844 }
19845 func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
19846 v_2 := v.Args[2]
19847 v_1 := v.Args[1]
19848 v_0 := v.Args[0]
19849 b := v.Block
19850 typ := &b.Func.Config.Types
19851
19852
19853 for {
19854 off := auxIntToInt32(v.AuxInt)
19855 sym := auxToSym(v.Aux)
19856 ptr := v_0
19857 if v_1.Op != OpAMD64TESTL {
19858 break
19859 }
19860 _ = v_1.Args[1]
19861 v_1_0 := v_1.Args[0]
19862 v_1_1 := v_1.Args[1]
19863 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19864 if v_1_0.Op != OpAMD64SHLL {
19865 continue
19866 }
19867 x := v_1_0.Args[1]
19868 v_1_0_0 := v_1_0.Args[0]
19869 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
19870 continue
19871 }
19872 y := v_1_1
19873 mem := v_2
19874 v.reset(OpAMD64SETBstore)
19875 v.AuxInt = int32ToAuxInt(off)
19876 v.Aux = symToAux(sym)
19877 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
19878 v0.AddArg2(x, y)
19879 v.AddArg3(ptr, v0, mem)
19880 return true
19881 }
19882 break
19883 }
19884
19885
19886 for {
19887 off := auxIntToInt32(v.AuxInt)
19888 sym := auxToSym(v.Aux)
19889 ptr := v_0
19890 if v_1.Op != OpAMD64TESTQ {
19891 break
19892 }
19893 _ = v_1.Args[1]
19894 v_1_0 := v_1.Args[0]
19895 v_1_1 := v_1.Args[1]
19896 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19897 if v_1_0.Op != OpAMD64SHLQ {
19898 continue
19899 }
19900 x := v_1_0.Args[1]
19901 v_1_0_0 := v_1_0.Args[0]
19902 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
19903 continue
19904 }
19905 y := v_1_1
19906 mem := v_2
19907 v.reset(OpAMD64SETBstore)
19908 v.AuxInt = int32ToAuxInt(off)
19909 v.Aux = symToAux(sym)
19910 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
19911 v0.AddArg2(x, y)
19912 v.AddArg3(ptr, v0, mem)
19913 return true
19914 }
19915 break
19916 }
19917
19918
19919
19920 for {
19921 off := auxIntToInt32(v.AuxInt)
19922 sym := auxToSym(v.Aux)
19923 ptr := v_0
19924 if v_1.Op != OpAMD64TESTLconst {
19925 break
19926 }
19927 c := auxIntToInt32(v_1.AuxInt)
19928 x := v_1.Args[0]
19929 mem := v_2
19930 if !(isUint32PowerOfTwo(int64(c))) {
19931 break
19932 }
19933 v.reset(OpAMD64SETBstore)
19934 v.AuxInt = int32ToAuxInt(off)
19935 v.Aux = symToAux(sym)
19936 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19937 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19938 v0.AddArg(x)
19939 v.AddArg3(ptr, v0, mem)
19940 return true
19941 }
19942
19943
19944
19945 for {
19946 off := auxIntToInt32(v.AuxInt)
19947 sym := auxToSym(v.Aux)
19948 ptr := v_0
19949 if v_1.Op != OpAMD64TESTQconst {
19950 break
19951 }
19952 c := auxIntToInt32(v_1.AuxInt)
19953 x := v_1.Args[0]
19954 mem := v_2
19955 if !(isUint64PowerOfTwo(int64(c))) {
19956 break
19957 }
19958 v.reset(OpAMD64SETBstore)
19959 v.AuxInt = int32ToAuxInt(off)
19960 v.Aux = symToAux(sym)
19961 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19962 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19963 v0.AddArg(x)
19964 v.AddArg3(ptr, v0, mem)
19965 return true
19966 }
19967
19968
19969
19970 for {
19971 off := auxIntToInt32(v.AuxInt)
19972 sym := auxToSym(v.Aux)
19973 ptr := v_0
19974 if v_1.Op != OpAMD64TESTQ {
19975 break
19976 }
19977 _ = v_1.Args[1]
19978 v_1_0 := v_1.Args[0]
19979 v_1_1 := v_1.Args[1]
19980 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19981 if v_1_0.Op != OpAMD64MOVQconst {
19982 continue
19983 }
19984 c := auxIntToInt64(v_1_0.AuxInt)
19985 x := v_1_1
19986 mem := v_2
19987 if !(isUint64PowerOfTwo(c)) {
19988 continue
19989 }
19990 v.reset(OpAMD64SETBstore)
19991 v.AuxInt = int32ToAuxInt(off)
19992 v.Aux = symToAux(sym)
19993 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19994 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
19995 v0.AddArg(x)
19996 v.AddArg3(ptr, v0, mem)
19997 return true
19998 }
19999 break
20000 }
20001
20002
20003 for {
20004 off := auxIntToInt32(v.AuxInt)
20005 sym := auxToSym(v.Aux)
20006 ptr := v_0
20007 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
20008 break
20009 }
20010 s := v_1.Args[0]
20011 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
20012 break
20013 }
20014 mem := v_2
20015 v.reset(OpAMD64SETEQstore)
20016 v.AuxInt = int32ToAuxInt(off)
20017 v.Aux = symToAux(sym)
20018 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
20019 v0.AuxInt = int32ToAuxInt(0)
20020 v0.AddArg(s)
20021 v.AddArg3(ptr, v0, mem)
20022 return true
20023 }
20024
20025
20026 for {
20027 off := auxIntToInt32(v.AuxInt)
20028 sym := auxToSym(v.Aux)
20029 ptr := v_0
20030 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
20031 break
20032 }
20033 s := v_1.Args[0]
20034 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
20035 break
20036 }
20037 mem := v_2
20038 v.reset(OpAMD64SETEQstore)
20039 v.AuxInt = int32ToAuxInt(off)
20040 v.Aux = symToAux(sym)
20041 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
20042 v0.AuxInt = int32ToAuxInt(0)
20043 v0.AddArg(s)
20044 v.AddArg3(ptr, v0, mem)
20045 return true
20046 }
20047
20048
20049
20050 for {
20051 off := auxIntToInt32(v.AuxInt)
20052 sym := auxToSym(v.Aux)
20053 ptr := v_0
20054 if v_1.Op != OpAMD64TESTQ {
20055 break
20056 }
20057 _ = v_1.Args[1]
20058 v_1_0 := v_1.Args[0]
20059 v_1_1 := v_1.Args[1]
20060 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20061 z1 := v_1_0
20062 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
20063 continue
20064 }
20065 z1_0 := z1.Args[0]
20066 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20067 continue
20068 }
20069 x := z1_0.Args[0]
20070 z2 := v_1_1
20071 mem := v_2
20072 if !(z1 == z2) {
20073 continue
20074 }
20075 v.reset(OpAMD64SETBstore)
20076 v.AuxInt = int32ToAuxInt(off)
20077 v.Aux = symToAux(sym)
20078 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20079 v0.AuxInt = int8ToAuxInt(63)
20080 v0.AddArg(x)
20081 v.AddArg3(ptr, v0, mem)
20082 return true
20083 }
20084 break
20085 }
20086
20087
20088
20089 for {
20090 off := auxIntToInt32(v.AuxInt)
20091 sym := auxToSym(v.Aux)
20092 ptr := v_0
20093 if v_1.Op != OpAMD64TESTL {
20094 break
20095 }
20096 _ = v_1.Args[1]
20097 v_1_0 := v_1.Args[0]
20098 v_1_1 := v_1.Args[1]
20099 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20100 z1 := v_1_0
20101 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
20102 continue
20103 }
20104 z1_0 := z1.Args[0]
20105 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20106 continue
20107 }
20108 x := z1_0.Args[0]
20109 z2 := v_1_1
20110 mem := v_2
20111 if !(z1 == z2) {
20112 continue
20113 }
20114 v.reset(OpAMD64SETBstore)
20115 v.AuxInt = int32ToAuxInt(off)
20116 v.Aux = symToAux(sym)
20117 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20118 v0.AuxInt = int8ToAuxInt(31)
20119 v0.AddArg(x)
20120 v.AddArg3(ptr, v0, mem)
20121 return true
20122 }
20123 break
20124 }
20125
20126
20127
20128 for {
20129 off := auxIntToInt32(v.AuxInt)
20130 sym := auxToSym(v.Aux)
20131 ptr := v_0
20132 if v_1.Op != OpAMD64TESTQ {
20133 break
20134 }
20135 _ = v_1.Args[1]
20136 v_1_0 := v_1.Args[0]
20137 v_1_1 := v_1.Args[1]
20138 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20139 z1 := v_1_0
20140 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20141 continue
20142 }
20143 z1_0 := z1.Args[0]
20144 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20145 continue
20146 }
20147 x := z1_0.Args[0]
20148 z2 := v_1_1
20149 mem := v_2
20150 if !(z1 == z2) {
20151 continue
20152 }
20153 v.reset(OpAMD64SETBstore)
20154 v.AuxInt = int32ToAuxInt(off)
20155 v.Aux = symToAux(sym)
20156 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20157 v0.AuxInt = int8ToAuxInt(0)
20158 v0.AddArg(x)
20159 v.AddArg3(ptr, v0, mem)
20160 return true
20161 }
20162 break
20163 }
20164
20165
20166
20167 for {
20168 off := auxIntToInt32(v.AuxInt)
20169 sym := auxToSym(v.Aux)
20170 ptr := v_0
20171 if v_1.Op != OpAMD64TESTL {
20172 break
20173 }
20174 _ = v_1.Args[1]
20175 v_1_0 := v_1.Args[0]
20176 v_1_1 := v_1.Args[1]
20177 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20178 z1 := v_1_0
20179 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20180 continue
20181 }
20182 z1_0 := z1.Args[0]
20183 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20184 continue
20185 }
20186 x := z1_0.Args[0]
20187 z2 := v_1_1
20188 mem := v_2
20189 if !(z1 == z2) {
20190 continue
20191 }
20192 v.reset(OpAMD64SETBstore)
20193 v.AuxInt = int32ToAuxInt(off)
20194 v.Aux = symToAux(sym)
20195 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20196 v0.AuxInt = int8ToAuxInt(0)
20197 v0.AddArg(x)
20198 v.AddArg3(ptr, v0, mem)
20199 return true
20200 }
20201 break
20202 }
20203
20204
20205
20206 for {
20207 off := auxIntToInt32(v.AuxInt)
20208 sym := auxToSym(v.Aux)
20209 ptr := v_0
20210 if v_1.Op != OpAMD64TESTQ {
20211 break
20212 }
20213 _ = v_1.Args[1]
20214 v_1_0 := v_1.Args[0]
20215 v_1_1 := v_1.Args[1]
20216 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20217 z1 := v_1_0
20218 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20219 continue
20220 }
20221 x := z1.Args[0]
20222 z2 := v_1_1
20223 mem := v_2
20224 if !(z1 == z2) {
20225 continue
20226 }
20227 v.reset(OpAMD64SETBstore)
20228 v.AuxInt = int32ToAuxInt(off)
20229 v.Aux = symToAux(sym)
20230 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20231 v0.AuxInt = int8ToAuxInt(63)
20232 v0.AddArg(x)
20233 v.AddArg3(ptr, v0, mem)
20234 return true
20235 }
20236 break
20237 }
20238
20239
20240
20241 for {
20242 off := auxIntToInt32(v.AuxInt)
20243 sym := auxToSym(v.Aux)
20244 ptr := v_0
20245 if v_1.Op != OpAMD64TESTL {
20246 break
20247 }
20248 _ = v_1.Args[1]
20249 v_1_0 := v_1.Args[0]
20250 v_1_1 := v_1.Args[1]
20251 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20252 z1 := v_1_0
20253 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20254 continue
20255 }
20256 x := z1.Args[0]
20257 z2 := v_1_1
20258 mem := v_2
20259 if !(z1 == z2) {
20260 continue
20261 }
20262 v.reset(OpAMD64SETBstore)
20263 v.AuxInt = int32ToAuxInt(off)
20264 v.Aux = symToAux(sym)
20265 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20266 v0.AuxInt = int8ToAuxInt(31)
20267 v0.AddArg(x)
20268 v.AddArg3(ptr, v0, mem)
20269 return true
20270 }
20271 break
20272 }
20273
20274
20275 for {
20276 off := auxIntToInt32(v.AuxInt)
20277 sym := auxToSym(v.Aux)
20278 ptr := v_0
20279 if v_1.Op != OpAMD64InvertFlags {
20280 break
20281 }
20282 x := v_1.Args[0]
20283 mem := v_2
20284 v.reset(OpAMD64SETNEstore)
20285 v.AuxInt = int32ToAuxInt(off)
20286 v.Aux = symToAux(sym)
20287 v.AddArg3(ptr, x, mem)
20288 return true
20289 }
20290
20291
20292
20293 for {
20294 off1 := auxIntToInt32(v.AuxInt)
20295 sym := auxToSym(v.Aux)
20296 if v_0.Op != OpAMD64ADDQconst {
20297 break
20298 }
20299 off2 := auxIntToInt32(v_0.AuxInt)
20300 base := v_0.Args[0]
20301 val := v_1
20302 mem := v_2
20303 if !(is32Bit(int64(off1) + int64(off2))) {
20304 break
20305 }
20306 v.reset(OpAMD64SETNEstore)
20307 v.AuxInt = int32ToAuxInt(off1 + off2)
20308 v.Aux = symToAux(sym)
20309 v.AddArg3(base, val, mem)
20310 return true
20311 }
20312
20313
20314
20315 for {
20316 off1 := auxIntToInt32(v.AuxInt)
20317 sym1 := auxToSym(v.Aux)
20318 if v_0.Op != OpAMD64LEAQ {
20319 break
20320 }
20321 off2 := auxIntToInt32(v_0.AuxInt)
20322 sym2 := auxToSym(v_0.Aux)
20323 base := v_0.Args[0]
20324 val := v_1
20325 mem := v_2
20326 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20327 break
20328 }
20329 v.reset(OpAMD64SETNEstore)
20330 v.AuxInt = int32ToAuxInt(off1 + off2)
20331 v.Aux = symToAux(mergeSym(sym1, sym2))
20332 v.AddArg3(base, val, mem)
20333 return true
20334 }
20335
20336
20337 for {
20338 off := auxIntToInt32(v.AuxInt)
20339 sym := auxToSym(v.Aux)
20340 ptr := v_0
20341 if v_1.Op != OpAMD64FlagEQ {
20342 break
20343 }
20344 mem := v_2
20345 v.reset(OpAMD64MOVBstore)
20346 v.AuxInt = int32ToAuxInt(off)
20347 v.Aux = symToAux(sym)
20348 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20349 v0.AuxInt = int32ToAuxInt(0)
20350 v.AddArg3(ptr, v0, mem)
20351 return true
20352 }
20353
20354
20355 for {
20356 off := auxIntToInt32(v.AuxInt)
20357 sym := auxToSym(v.Aux)
20358 ptr := v_0
20359 if v_1.Op != OpAMD64FlagLT_ULT {
20360 break
20361 }
20362 mem := v_2
20363 v.reset(OpAMD64MOVBstore)
20364 v.AuxInt = int32ToAuxInt(off)
20365 v.Aux = symToAux(sym)
20366 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20367 v0.AuxInt = int32ToAuxInt(1)
20368 v.AddArg3(ptr, v0, mem)
20369 return true
20370 }
20371
20372
20373 for {
20374 off := auxIntToInt32(v.AuxInt)
20375 sym := auxToSym(v.Aux)
20376 ptr := v_0
20377 if v_1.Op != OpAMD64FlagLT_UGT {
20378 break
20379 }
20380 mem := v_2
20381 v.reset(OpAMD64MOVBstore)
20382 v.AuxInt = int32ToAuxInt(off)
20383 v.Aux = symToAux(sym)
20384 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20385 v0.AuxInt = int32ToAuxInt(1)
20386 v.AddArg3(ptr, v0, mem)
20387 return true
20388 }
20389
20390
20391 for {
20392 off := auxIntToInt32(v.AuxInt)
20393 sym := auxToSym(v.Aux)
20394 ptr := v_0
20395 if v_1.Op != OpAMD64FlagGT_ULT {
20396 break
20397 }
20398 mem := v_2
20399 v.reset(OpAMD64MOVBstore)
20400 v.AuxInt = int32ToAuxInt(off)
20401 v.Aux = symToAux(sym)
20402 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20403 v0.AuxInt = int32ToAuxInt(1)
20404 v.AddArg3(ptr, v0, mem)
20405 return true
20406 }
20407
20408
20409 for {
20410 off := auxIntToInt32(v.AuxInt)
20411 sym := auxToSym(v.Aux)
20412 ptr := v_0
20413 if v_1.Op != OpAMD64FlagGT_UGT {
20414 break
20415 }
20416 mem := v_2
20417 v.reset(OpAMD64MOVBstore)
20418 v.AuxInt = int32ToAuxInt(off)
20419 v.Aux = symToAux(sym)
20420 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20421 v0.AuxInt = int32ToAuxInt(1)
20422 v.AddArg3(ptr, v0, mem)
20423 return true
20424 }
20425 return false
20426 }
20427 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
20428 v_1 := v.Args[1]
20429 v_0 := v.Args[0]
20430 b := v.Block
20431
20432
20433 for {
20434 x := v_0
20435 if v_1.Op != OpAMD64MOVQconst {
20436 break
20437 }
20438 c := auxIntToInt64(v_1.AuxInt)
20439 v.reset(OpAMD64SHLLconst)
20440 v.AuxInt = int8ToAuxInt(int8(c & 31))
20441 v.AddArg(x)
20442 return true
20443 }
20444
20445
20446 for {
20447 x := v_0
20448 if v_1.Op != OpAMD64MOVLconst {
20449 break
20450 }
20451 c := auxIntToInt32(v_1.AuxInt)
20452 v.reset(OpAMD64SHLLconst)
20453 v.AuxInt = int8ToAuxInt(int8(c & 31))
20454 v.AddArg(x)
20455 return true
20456 }
20457
20458
20459
20460 for {
20461 x := v_0
20462 if v_1.Op != OpAMD64ADDQconst {
20463 break
20464 }
20465 c := auxIntToInt32(v_1.AuxInt)
20466 y := v_1.Args[0]
20467 if !(c&31 == 0) {
20468 break
20469 }
20470 v.reset(OpAMD64SHLL)
20471 v.AddArg2(x, y)
20472 return true
20473 }
20474
20475
20476
20477 for {
20478 x := v_0
20479 if v_1.Op != OpAMD64NEGQ {
20480 break
20481 }
20482 t := v_1.Type
20483 v_1_0 := v_1.Args[0]
20484 if v_1_0.Op != OpAMD64ADDQconst {
20485 break
20486 }
20487 c := auxIntToInt32(v_1_0.AuxInt)
20488 y := v_1_0.Args[0]
20489 if !(c&31 == 0) {
20490 break
20491 }
20492 v.reset(OpAMD64SHLL)
20493 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20494 v0.AddArg(y)
20495 v.AddArg2(x, v0)
20496 return true
20497 }
20498
20499
20500
20501 for {
20502 x := v_0
20503 if v_1.Op != OpAMD64ANDQconst {
20504 break
20505 }
20506 c := auxIntToInt32(v_1.AuxInt)
20507 y := v_1.Args[0]
20508 if !(c&31 == 31) {
20509 break
20510 }
20511 v.reset(OpAMD64SHLL)
20512 v.AddArg2(x, y)
20513 return true
20514 }
20515
20516
20517
20518 for {
20519 x := v_0
20520 if v_1.Op != OpAMD64NEGQ {
20521 break
20522 }
20523 t := v_1.Type
20524 v_1_0 := v_1.Args[0]
20525 if v_1_0.Op != OpAMD64ANDQconst {
20526 break
20527 }
20528 c := auxIntToInt32(v_1_0.AuxInt)
20529 y := v_1_0.Args[0]
20530 if !(c&31 == 31) {
20531 break
20532 }
20533 v.reset(OpAMD64SHLL)
20534 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20535 v0.AddArg(y)
20536 v.AddArg2(x, v0)
20537 return true
20538 }
20539
20540
20541
20542 for {
20543 x := v_0
20544 if v_1.Op != OpAMD64ADDLconst {
20545 break
20546 }
20547 c := auxIntToInt32(v_1.AuxInt)
20548 y := v_1.Args[0]
20549 if !(c&31 == 0) {
20550 break
20551 }
20552 v.reset(OpAMD64SHLL)
20553 v.AddArg2(x, y)
20554 return true
20555 }
20556
20557
20558
20559 for {
20560 x := v_0
20561 if v_1.Op != OpAMD64NEGL {
20562 break
20563 }
20564 t := v_1.Type
20565 v_1_0 := v_1.Args[0]
20566 if v_1_0.Op != OpAMD64ADDLconst {
20567 break
20568 }
20569 c := auxIntToInt32(v_1_0.AuxInt)
20570 y := v_1_0.Args[0]
20571 if !(c&31 == 0) {
20572 break
20573 }
20574 v.reset(OpAMD64SHLL)
20575 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20576 v0.AddArg(y)
20577 v.AddArg2(x, v0)
20578 return true
20579 }
20580
20581
20582
20583 for {
20584 x := v_0
20585 if v_1.Op != OpAMD64ANDLconst {
20586 break
20587 }
20588 c := auxIntToInt32(v_1.AuxInt)
20589 y := v_1.Args[0]
20590 if !(c&31 == 31) {
20591 break
20592 }
20593 v.reset(OpAMD64SHLL)
20594 v.AddArg2(x, y)
20595 return true
20596 }
20597
20598
20599
20600 for {
20601 x := v_0
20602 if v_1.Op != OpAMD64NEGL {
20603 break
20604 }
20605 t := v_1.Type
20606 v_1_0 := v_1.Args[0]
20607 if v_1_0.Op != OpAMD64ANDLconst {
20608 break
20609 }
20610 c := auxIntToInt32(v_1_0.AuxInt)
20611 y := v_1_0.Args[0]
20612 if !(c&31 == 31) {
20613 break
20614 }
20615 v.reset(OpAMD64SHLL)
20616 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20617 v0.AddArg(y)
20618 v.AddArg2(x, v0)
20619 return true
20620 }
20621
20622
20623
20624 for {
20625 l := v_0
20626 if l.Op != OpAMD64MOVLload {
20627 break
20628 }
20629 off := auxIntToInt32(l.AuxInt)
20630 sym := auxToSym(l.Aux)
20631 mem := l.Args[1]
20632 ptr := l.Args[0]
20633 x := v_1
20634 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
20635 break
20636 }
20637 v.reset(OpAMD64SHLXLload)
20638 v.AuxInt = int32ToAuxInt(off)
20639 v.Aux = symToAux(sym)
20640 v.AddArg3(ptr, x, mem)
20641 return true
20642 }
20643 return false
20644 }
20645 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
20646 v_0 := v.Args[0]
20647
20648
20649 for {
20650 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
20651 break
20652 }
20653 x := v_0.Args[0]
20654 v.reset(OpAMD64ANDLconst)
20655 v.AuxInt = int32ToAuxInt(-2)
20656 v.AddArg(x)
20657 return true
20658 }
20659
20660
20661 for {
20662 if auxIntToInt8(v.AuxInt) != 0 {
20663 break
20664 }
20665 x := v_0
20666 v.copyOf(x)
20667 return true
20668 }
20669
20670
20671 for {
20672 d := auxIntToInt8(v.AuxInt)
20673 if v_0.Op != OpAMD64MOVLconst {
20674 break
20675 }
20676 c := auxIntToInt32(v_0.AuxInt)
20677 v.reset(OpAMD64MOVLconst)
20678 v.AuxInt = int32ToAuxInt(c << uint64(d))
20679 return true
20680 }
20681 return false
20682 }
20683 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
20684 v_1 := v.Args[1]
20685 v_0 := v.Args[0]
20686 b := v.Block
20687
20688
20689 for {
20690 x := v_0
20691 if v_1.Op != OpAMD64MOVQconst {
20692 break
20693 }
20694 c := auxIntToInt64(v_1.AuxInt)
20695 v.reset(OpAMD64SHLQconst)
20696 v.AuxInt = int8ToAuxInt(int8(c & 63))
20697 v.AddArg(x)
20698 return true
20699 }
20700
20701
20702 for {
20703 x := v_0
20704 if v_1.Op != OpAMD64MOVLconst {
20705 break
20706 }
20707 c := auxIntToInt32(v_1.AuxInt)
20708 v.reset(OpAMD64SHLQconst)
20709 v.AuxInt = int8ToAuxInt(int8(c & 63))
20710 v.AddArg(x)
20711 return true
20712 }
20713
20714
20715
20716 for {
20717 x := v_0
20718 if v_1.Op != OpAMD64ADDQconst {
20719 break
20720 }
20721 c := auxIntToInt32(v_1.AuxInt)
20722 y := v_1.Args[0]
20723 if !(c&63 == 0) {
20724 break
20725 }
20726 v.reset(OpAMD64SHLQ)
20727 v.AddArg2(x, y)
20728 return true
20729 }
20730
20731
20732
20733 for {
20734 x := v_0
20735 if v_1.Op != OpAMD64NEGQ {
20736 break
20737 }
20738 t := v_1.Type
20739 v_1_0 := v_1.Args[0]
20740 if v_1_0.Op != OpAMD64ADDQconst {
20741 break
20742 }
20743 c := auxIntToInt32(v_1_0.AuxInt)
20744 y := v_1_0.Args[0]
20745 if !(c&63 == 0) {
20746 break
20747 }
20748 v.reset(OpAMD64SHLQ)
20749 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20750 v0.AddArg(y)
20751 v.AddArg2(x, v0)
20752 return true
20753 }
20754
20755
20756
20757 for {
20758 x := v_0
20759 if v_1.Op != OpAMD64ANDQconst {
20760 break
20761 }
20762 c := auxIntToInt32(v_1.AuxInt)
20763 y := v_1.Args[0]
20764 if !(c&63 == 63) {
20765 break
20766 }
20767 v.reset(OpAMD64SHLQ)
20768 v.AddArg2(x, y)
20769 return true
20770 }
20771
20772
20773
20774 for {
20775 x := v_0
20776 if v_1.Op != OpAMD64NEGQ {
20777 break
20778 }
20779 t := v_1.Type
20780 v_1_0 := v_1.Args[0]
20781 if v_1_0.Op != OpAMD64ANDQconst {
20782 break
20783 }
20784 c := auxIntToInt32(v_1_0.AuxInt)
20785 y := v_1_0.Args[0]
20786 if !(c&63 == 63) {
20787 break
20788 }
20789 v.reset(OpAMD64SHLQ)
20790 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20791 v0.AddArg(y)
20792 v.AddArg2(x, v0)
20793 return true
20794 }
20795
20796
20797
20798 for {
20799 x := v_0
20800 if v_1.Op != OpAMD64ADDLconst {
20801 break
20802 }
20803 c := auxIntToInt32(v_1.AuxInt)
20804 y := v_1.Args[0]
20805 if !(c&63 == 0) {
20806 break
20807 }
20808 v.reset(OpAMD64SHLQ)
20809 v.AddArg2(x, y)
20810 return true
20811 }
20812
20813
20814
20815 for {
20816 x := v_0
20817 if v_1.Op != OpAMD64NEGL {
20818 break
20819 }
20820 t := v_1.Type
20821 v_1_0 := v_1.Args[0]
20822 if v_1_0.Op != OpAMD64ADDLconst {
20823 break
20824 }
20825 c := auxIntToInt32(v_1_0.AuxInt)
20826 y := v_1_0.Args[0]
20827 if !(c&63 == 0) {
20828 break
20829 }
20830 v.reset(OpAMD64SHLQ)
20831 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20832 v0.AddArg(y)
20833 v.AddArg2(x, v0)
20834 return true
20835 }
20836
20837
20838
20839 for {
20840 x := v_0
20841 if v_1.Op != OpAMD64ANDLconst {
20842 break
20843 }
20844 c := auxIntToInt32(v_1.AuxInt)
20845 y := v_1.Args[0]
20846 if !(c&63 == 63) {
20847 break
20848 }
20849 v.reset(OpAMD64SHLQ)
20850 v.AddArg2(x, y)
20851 return true
20852 }
20853
20854
20855
20856 for {
20857 x := v_0
20858 if v_1.Op != OpAMD64NEGL {
20859 break
20860 }
20861 t := v_1.Type
20862 v_1_0 := v_1.Args[0]
20863 if v_1_0.Op != OpAMD64ANDLconst {
20864 break
20865 }
20866 c := auxIntToInt32(v_1_0.AuxInt)
20867 y := v_1_0.Args[0]
20868 if !(c&63 == 63) {
20869 break
20870 }
20871 v.reset(OpAMD64SHLQ)
20872 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20873 v0.AddArg(y)
20874 v.AddArg2(x, v0)
20875 return true
20876 }
20877
20878
20879
20880 for {
20881 l := v_0
20882 if l.Op != OpAMD64MOVQload {
20883 break
20884 }
20885 off := auxIntToInt32(l.AuxInt)
20886 sym := auxToSym(l.Aux)
20887 mem := l.Args[1]
20888 ptr := l.Args[0]
20889 x := v_1
20890 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
20891 break
20892 }
20893 v.reset(OpAMD64SHLXQload)
20894 v.AuxInt = int32ToAuxInt(off)
20895 v.Aux = symToAux(sym)
20896 v.AddArg3(ptr, x, mem)
20897 return true
20898 }
20899 return false
20900 }
20901 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
20902 v_0 := v.Args[0]
20903
20904
20905 for {
20906 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
20907 break
20908 }
20909 x := v_0.Args[0]
20910 v.reset(OpAMD64ANDQconst)
20911 v.AuxInt = int32ToAuxInt(-2)
20912 v.AddArg(x)
20913 return true
20914 }
20915
20916
20917 for {
20918 if auxIntToInt8(v.AuxInt) != 0 {
20919 break
20920 }
20921 x := v_0
20922 v.copyOf(x)
20923 return true
20924 }
20925
20926
20927 for {
20928 d := auxIntToInt8(v.AuxInt)
20929 if v_0.Op != OpAMD64MOVQconst {
20930 break
20931 }
20932 c := auxIntToInt64(v_0.AuxInt)
20933 v.reset(OpAMD64MOVQconst)
20934 v.AuxInt = int64ToAuxInt(c << uint64(d))
20935 return true
20936 }
20937
20938
20939 for {
20940 d := auxIntToInt8(v.AuxInt)
20941 if v_0.Op != OpAMD64MOVLconst {
20942 break
20943 }
20944 c := auxIntToInt32(v_0.AuxInt)
20945 v.reset(OpAMD64MOVQconst)
20946 v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
20947 return true
20948 }
20949 return false
20950 }
20951 func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool {
20952 v_2 := v.Args[2]
20953 v_1 := v.Args[1]
20954 v_0 := v.Args[0]
20955 b := v.Block
20956 typ := &b.Func.Config.Types
20957
20958
20959 for {
20960 off := auxIntToInt32(v.AuxInt)
20961 sym := auxToSym(v.Aux)
20962 ptr := v_0
20963 if v_1.Op != OpAMD64MOVLconst {
20964 break
20965 }
20966 c := auxIntToInt32(v_1.AuxInt)
20967 mem := v_2
20968 v.reset(OpAMD64SHLLconst)
20969 v.AuxInt = int8ToAuxInt(int8(c & 31))
20970 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
20971 v0.AuxInt = int32ToAuxInt(off)
20972 v0.Aux = symToAux(sym)
20973 v0.AddArg2(ptr, mem)
20974 v.AddArg(v0)
20975 return true
20976 }
20977 return false
20978 }
20979 func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool {
20980 v_2 := v.Args[2]
20981 v_1 := v.Args[1]
20982 v_0 := v.Args[0]
20983 b := v.Block
20984 typ := &b.Func.Config.Types
20985
20986
20987 for {
20988 off := auxIntToInt32(v.AuxInt)
20989 sym := auxToSym(v.Aux)
20990 ptr := v_0
20991 if v_1.Op != OpAMD64MOVQconst {
20992 break
20993 }
20994 c := auxIntToInt64(v_1.AuxInt)
20995 mem := v_2
20996 v.reset(OpAMD64SHLQconst)
20997 v.AuxInt = int8ToAuxInt(int8(c & 63))
20998 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
20999 v0.AuxInt = int32ToAuxInt(off)
21000 v0.Aux = symToAux(sym)
21001 v0.AddArg2(ptr, mem)
21002 v.AddArg(v0)
21003 return true
21004 }
21005
21006
21007 for {
21008 off := auxIntToInt32(v.AuxInt)
21009 sym := auxToSym(v.Aux)
21010 ptr := v_0
21011 if v_1.Op != OpAMD64MOVLconst {
21012 break
21013 }
21014 c := auxIntToInt32(v_1.AuxInt)
21015 mem := v_2
21016 v.reset(OpAMD64SHLQconst)
21017 v.AuxInt = int8ToAuxInt(int8(c & 63))
21018 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21019 v0.AuxInt = int32ToAuxInt(off)
21020 v0.Aux = symToAux(sym)
21021 v0.AddArg2(ptr, mem)
21022 v.AddArg(v0)
21023 return true
21024 }
21025 return false
21026 }
21027 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
21028 v_1 := v.Args[1]
21029 v_0 := v.Args[0]
21030
21031
21032
21033 for {
21034 x := v_0
21035 if v_1.Op != OpAMD64MOVQconst {
21036 break
21037 }
21038 c := auxIntToInt64(v_1.AuxInt)
21039 if !(c&31 < 8) {
21040 break
21041 }
21042 v.reset(OpAMD64SHRBconst)
21043 v.AuxInt = int8ToAuxInt(int8(c & 31))
21044 v.AddArg(x)
21045 return true
21046 }
21047
21048
21049
21050 for {
21051 x := v_0
21052 if v_1.Op != OpAMD64MOVLconst {
21053 break
21054 }
21055 c := auxIntToInt32(v_1.AuxInt)
21056 if !(c&31 < 8) {
21057 break
21058 }
21059 v.reset(OpAMD64SHRBconst)
21060 v.AuxInt = int8ToAuxInt(int8(c & 31))
21061 v.AddArg(x)
21062 return true
21063 }
21064
21065
21066
21067 for {
21068 if v_1.Op != OpAMD64MOVQconst {
21069 break
21070 }
21071 c := auxIntToInt64(v_1.AuxInt)
21072 if !(c&31 >= 8) {
21073 break
21074 }
21075 v.reset(OpAMD64MOVLconst)
21076 v.AuxInt = int32ToAuxInt(0)
21077 return true
21078 }
21079
21080
21081
21082 for {
21083 if v_1.Op != OpAMD64MOVLconst {
21084 break
21085 }
21086 c := auxIntToInt32(v_1.AuxInt)
21087 if !(c&31 >= 8) {
21088 break
21089 }
21090 v.reset(OpAMD64MOVLconst)
21091 v.AuxInt = int32ToAuxInt(0)
21092 return true
21093 }
21094 return false
21095 }
21096 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
21097 v_0 := v.Args[0]
21098
21099
21100 for {
21101 if auxIntToInt8(v.AuxInt) != 0 {
21102 break
21103 }
21104 x := v_0
21105 v.copyOf(x)
21106 return true
21107 }
21108 return false
21109 }
21110 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
21111 v_1 := v.Args[1]
21112 v_0 := v.Args[0]
21113 b := v.Block
21114
21115
21116 for {
21117 x := v_0
21118 if v_1.Op != OpAMD64MOVQconst {
21119 break
21120 }
21121 c := auxIntToInt64(v_1.AuxInt)
21122 v.reset(OpAMD64SHRLconst)
21123 v.AuxInt = int8ToAuxInt(int8(c & 31))
21124 v.AddArg(x)
21125 return true
21126 }
21127
21128
21129 for {
21130 x := v_0
21131 if v_1.Op != OpAMD64MOVLconst {
21132 break
21133 }
21134 c := auxIntToInt32(v_1.AuxInt)
21135 v.reset(OpAMD64SHRLconst)
21136 v.AuxInt = int8ToAuxInt(int8(c & 31))
21137 v.AddArg(x)
21138 return true
21139 }
21140
21141
21142
21143 for {
21144 x := v_0
21145 if v_1.Op != OpAMD64ADDQconst {
21146 break
21147 }
21148 c := auxIntToInt32(v_1.AuxInt)
21149 y := v_1.Args[0]
21150 if !(c&31 == 0) {
21151 break
21152 }
21153 v.reset(OpAMD64SHRL)
21154 v.AddArg2(x, y)
21155 return true
21156 }
21157
21158
21159
21160 for {
21161 x := v_0
21162 if v_1.Op != OpAMD64NEGQ {
21163 break
21164 }
21165 t := v_1.Type
21166 v_1_0 := v_1.Args[0]
21167 if v_1_0.Op != OpAMD64ADDQconst {
21168 break
21169 }
21170 c := auxIntToInt32(v_1_0.AuxInt)
21171 y := v_1_0.Args[0]
21172 if !(c&31 == 0) {
21173 break
21174 }
21175 v.reset(OpAMD64SHRL)
21176 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21177 v0.AddArg(y)
21178 v.AddArg2(x, v0)
21179 return true
21180 }
21181
21182
21183
21184 for {
21185 x := v_0
21186 if v_1.Op != OpAMD64ANDQconst {
21187 break
21188 }
21189 c := auxIntToInt32(v_1.AuxInt)
21190 y := v_1.Args[0]
21191 if !(c&31 == 31) {
21192 break
21193 }
21194 v.reset(OpAMD64SHRL)
21195 v.AddArg2(x, y)
21196 return true
21197 }
21198
21199
21200
21201 for {
21202 x := v_0
21203 if v_1.Op != OpAMD64NEGQ {
21204 break
21205 }
21206 t := v_1.Type
21207 v_1_0 := v_1.Args[0]
21208 if v_1_0.Op != OpAMD64ANDQconst {
21209 break
21210 }
21211 c := auxIntToInt32(v_1_0.AuxInt)
21212 y := v_1_0.Args[0]
21213 if !(c&31 == 31) {
21214 break
21215 }
21216 v.reset(OpAMD64SHRL)
21217 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21218 v0.AddArg(y)
21219 v.AddArg2(x, v0)
21220 return true
21221 }
21222
21223
21224
21225 for {
21226 x := v_0
21227 if v_1.Op != OpAMD64ADDLconst {
21228 break
21229 }
21230 c := auxIntToInt32(v_1.AuxInt)
21231 y := v_1.Args[0]
21232 if !(c&31 == 0) {
21233 break
21234 }
21235 v.reset(OpAMD64SHRL)
21236 v.AddArg2(x, y)
21237 return true
21238 }
21239
21240
21241
21242 for {
21243 x := v_0
21244 if v_1.Op != OpAMD64NEGL {
21245 break
21246 }
21247 t := v_1.Type
21248 v_1_0 := v_1.Args[0]
21249 if v_1_0.Op != OpAMD64ADDLconst {
21250 break
21251 }
21252 c := auxIntToInt32(v_1_0.AuxInt)
21253 y := v_1_0.Args[0]
21254 if !(c&31 == 0) {
21255 break
21256 }
21257 v.reset(OpAMD64SHRL)
21258 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21259 v0.AddArg(y)
21260 v.AddArg2(x, v0)
21261 return true
21262 }
21263
21264
21265
21266 for {
21267 x := v_0
21268 if v_1.Op != OpAMD64ANDLconst {
21269 break
21270 }
21271 c := auxIntToInt32(v_1.AuxInt)
21272 y := v_1.Args[0]
21273 if !(c&31 == 31) {
21274 break
21275 }
21276 v.reset(OpAMD64SHRL)
21277 v.AddArg2(x, y)
21278 return true
21279 }
21280
21281
21282
21283 for {
21284 x := v_0
21285 if v_1.Op != OpAMD64NEGL {
21286 break
21287 }
21288 t := v_1.Type
21289 v_1_0 := v_1.Args[0]
21290 if v_1_0.Op != OpAMD64ANDLconst {
21291 break
21292 }
21293 c := auxIntToInt32(v_1_0.AuxInt)
21294 y := v_1_0.Args[0]
21295 if !(c&31 == 31) {
21296 break
21297 }
21298 v.reset(OpAMD64SHRL)
21299 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21300 v0.AddArg(y)
21301 v.AddArg2(x, v0)
21302 return true
21303 }
21304
21305
21306
21307 for {
21308 l := v_0
21309 if l.Op != OpAMD64MOVLload {
21310 break
21311 }
21312 off := auxIntToInt32(l.AuxInt)
21313 sym := auxToSym(l.Aux)
21314 mem := l.Args[1]
21315 ptr := l.Args[0]
21316 x := v_1
21317 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21318 break
21319 }
21320 v.reset(OpAMD64SHRXLload)
21321 v.AuxInt = int32ToAuxInt(off)
21322 v.Aux = symToAux(sym)
21323 v.AddArg3(ptr, x, mem)
21324 return true
21325 }
21326 return false
21327 }
21328 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
21329 v_0 := v.Args[0]
21330
21331
21332 for {
21333 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
21334 break
21335 }
21336 x := v_0.Args[0]
21337 v.reset(OpAMD64ANDLconst)
21338 v.AuxInt = int32ToAuxInt(0x7fffffff)
21339 v.AddArg(x)
21340 return true
21341 }
21342
21343
21344 for {
21345 if auxIntToInt8(v.AuxInt) != 0 {
21346 break
21347 }
21348 x := v_0
21349 v.copyOf(x)
21350 return true
21351 }
21352 return false
21353 }
21354 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
21355 v_1 := v.Args[1]
21356 v_0 := v.Args[0]
21357 b := v.Block
21358
21359
21360 for {
21361 x := v_0
21362 if v_1.Op != OpAMD64MOVQconst {
21363 break
21364 }
21365 c := auxIntToInt64(v_1.AuxInt)
21366 v.reset(OpAMD64SHRQconst)
21367 v.AuxInt = int8ToAuxInt(int8(c & 63))
21368 v.AddArg(x)
21369 return true
21370 }
21371
21372
21373 for {
21374 x := v_0
21375 if v_1.Op != OpAMD64MOVLconst {
21376 break
21377 }
21378 c := auxIntToInt32(v_1.AuxInt)
21379 v.reset(OpAMD64SHRQconst)
21380 v.AuxInt = int8ToAuxInt(int8(c & 63))
21381 v.AddArg(x)
21382 return true
21383 }
21384
21385
21386
21387 for {
21388 x := v_0
21389 if v_1.Op != OpAMD64ADDQconst {
21390 break
21391 }
21392 c := auxIntToInt32(v_1.AuxInt)
21393 y := v_1.Args[0]
21394 if !(c&63 == 0) {
21395 break
21396 }
21397 v.reset(OpAMD64SHRQ)
21398 v.AddArg2(x, y)
21399 return true
21400 }
21401
21402
21403
21404 for {
21405 x := v_0
21406 if v_1.Op != OpAMD64NEGQ {
21407 break
21408 }
21409 t := v_1.Type
21410 v_1_0 := v_1.Args[0]
21411 if v_1_0.Op != OpAMD64ADDQconst {
21412 break
21413 }
21414 c := auxIntToInt32(v_1_0.AuxInt)
21415 y := v_1_0.Args[0]
21416 if !(c&63 == 0) {
21417 break
21418 }
21419 v.reset(OpAMD64SHRQ)
21420 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21421 v0.AddArg(y)
21422 v.AddArg2(x, v0)
21423 return true
21424 }
21425
21426
21427
21428 for {
21429 x := v_0
21430 if v_1.Op != OpAMD64ANDQconst {
21431 break
21432 }
21433 c := auxIntToInt32(v_1.AuxInt)
21434 y := v_1.Args[0]
21435 if !(c&63 == 63) {
21436 break
21437 }
21438 v.reset(OpAMD64SHRQ)
21439 v.AddArg2(x, y)
21440 return true
21441 }
21442
21443
21444
21445 for {
21446 x := v_0
21447 if v_1.Op != OpAMD64NEGQ {
21448 break
21449 }
21450 t := v_1.Type
21451 v_1_0 := v_1.Args[0]
21452 if v_1_0.Op != OpAMD64ANDQconst {
21453 break
21454 }
21455 c := auxIntToInt32(v_1_0.AuxInt)
21456 y := v_1_0.Args[0]
21457 if !(c&63 == 63) {
21458 break
21459 }
21460 v.reset(OpAMD64SHRQ)
21461 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21462 v0.AddArg(y)
21463 v.AddArg2(x, v0)
21464 return true
21465 }
21466
21467
21468
21469 for {
21470 x := v_0
21471 if v_1.Op != OpAMD64ADDLconst {
21472 break
21473 }
21474 c := auxIntToInt32(v_1.AuxInt)
21475 y := v_1.Args[0]
21476 if !(c&63 == 0) {
21477 break
21478 }
21479 v.reset(OpAMD64SHRQ)
21480 v.AddArg2(x, y)
21481 return true
21482 }
21483
21484
21485
21486 for {
21487 x := v_0
21488 if v_1.Op != OpAMD64NEGL {
21489 break
21490 }
21491 t := v_1.Type
21492 v_1_0 := v_1.Args[0]
21493 if v_1_0.Op != OpAMD64ADDLconst {
21494 break
21495 }
21496 c := auxIntToInt32(v_1_0.AuxInt)
21497 y := v_1_0.Args[0]
21498 if !(c&63 == 0) {
21499 break
21500 }
21501 v.reset(OpAMD64SHRQ)
21502 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21503 v0.AddArg(y)
21504 v.AddArg2(x, v0)
21505 return true
21506 }
21507
21508
21509
21510 for {
21511 x := v_0
21512 if v_1.Op != OpAMD64ANDLconst {
21513 break
21514 }
21515 c := auxIntToInt32(v_1.AuxInt)
21516 y := v_1.Args[0]
21517 if !(c&63 == 63) {
21518 break
21519 }
21520 v.reset(OpAMD64SHRQ)
21521 v.AddArg2(x, y)
21522 return true
21523 }
21524
21525
21526
21527 for {
21528 x := v_0
21529 if v_1.Op != OpAMD64NEGL {
21530 break
21531 }
21532 t := v_1.Type
21533 v_1_0 := v_1.Args[0]
21534 if v_1_0.Op != OpAMD64ANDLconst {
21535 break
21536 }
21537 c := auxIntToInt32(v_1_0.AuxInt)
21538 y := v_1_0.Args[0]
21539 if !(c&63 == 63) {
21540 break
21541 }
21542 v.reset(OpAMD64SHRQ)
21543 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21544 v0.AddArg(y)
21545 v.AddArg2(x, v0)
21546 return true
21547 }
21548
21549
21550
21551 for {
21552 l := v_0
21553 if l.Op != OpAMD64MOVQload {
21554 break
21555 }
21556 off := auxIntToInt32(l.AuxInt)
21557 sym := auxToSym(l.Aux)
21558 mem := l.Args[1]
21559 ptr := l.Args[0]
21560 x := v_1
21561 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21562 break
21563 }
21564 v.reset(OpAMD64SHRXQload)
21565 v.AuxInt = int32ToAuxInt(off)
21566 v.Aux = symToAux(sym)
21567 v.AddArg3(ptr, x, mem)
21568 return true
21569 }
21570 return false
21571 }
21572 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
21573 v_0 := v.Args[0]
21574
21575
21576 for {
21577 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
21578 break
21579 }
21580 x := v_0.Args[0]
21581 v.reset(OpAMD64BTRQconst)
21582 v.AuxInt = int8ToAuxInt(63)
21583 v.AddArg(x)
21584 return true
21585 }
21586
21587
21588 for {
21589 if auxIntToInt8(v.AuxInt) != 0 {
21590 break
21591 }
21592 x := v_0
21593 v.copyOf(x)
21594 return true
21595 }
21596 return false
21597 }
21598 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
21599 v_1 := v.Args[1]
21600 v_0 := v.Args[0]
21601
21602
21603
21604 for {
21605 x := v_0
21606 if v_1.Op != OpAMD64MOVQconst {
21607 break
21608 }
21609 c := auxIntToInt64(v_1.AuxInt)
21610 if !(c&31 < 16) {
21611 break
21612 }
21613 v.reset(OpAMD64SHRWconst)
21614 v.AuxInt = int8ToAuxInt(int8(c & 31))
21615 v.AddArg(x)
21616 return true
21617 }
21618
21619
21620
21621 for {
21622 x := v_0
21623 if v_1.Op != OpAMD64MOVLconst {
21624 break
21625 }
21626 c := auxIntToInt32(v_1.AuxInt)
21627 if !(c&31 < 16) {
21628 break
21629 }
21630 v.reset(OpAMD64SHRWconst)
21631 v.AuxInt = int8ToAuxInt(int8(c & 31))
21632 v.AddArg(x)
21633 return true
21634 }
21635
21636
21637
21638 for {
21639 if v_1.Op != OpAMD64MOVQconst {
21640 break
21641 }
21642 c := auxIntToInt64(v_1.AuxInt)
21643 if !(c&31 >= 16) {
21644 break
21645 }
21646 v.reset(OpAMD64MOVLconst)
21647 v.AuxInt = int32ToAuxInt(0)
21648 return true
21649 }
21650
21651
21652
21653 for {
21654 if v_1.Op != OpAMD64MOVLconst {
21655 break
21656 }
21657 c := auxIntToInt32(v_1.AuxInt)
21658 if !(c&31 >= 16) {
21659 break
21660 }
21661 v.reset(OpAMD64MOVLconst)
21662 v.AuxInt = int32ToAuxInt(0)
21663 return true
21664 }
21665 return false
21666 }
21667 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
21668 v_0 := v.Args[0]
21669
21670
21671 for {
21672 if auxIntToInt8(v.AuxInt) != 0 {
21673 break
21674 }
21675 x := v_0
21676 v.copyOf(x)
21677 return true
21678 }
21679 return false
21680 }
21681 func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool {
21682 v_2 := v.Args[2]
21683 v_1 := v.Args[1]
21684 v_0 := v.Args[0]
21685 b := v.Block
21686 typ := &b.Func.Config.Types
21687
21688
21689 for {
21690 off := auxIntToInt32(v.AuxInt)
21691 sym := auxToSym(v.Aux)
21692 ptr := v_0
21693 if v_1.Op != OpAMD64MOVLconst {
21694 break
21695 }
21696 c := auxIntToInt32(v_1.AuxInt)
21697 mem := v_2
21698 v.reset(OpAMD64SHRLconst)
21699 v.AuxInt = int8ToAuxInt(int8(c & 31))
21700 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
21701 v0.AuxInt = int32ToAuxInt(off)
21702 v0.Aux = symToAux(sym)
21703 v0.AddArg2(ptr, mem)
21704 v.AddArg(v0)
21705 return true
21706 }
21707 return false
21708 }
21709 func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool {
21710 v_2 := v.Args[2]
21711 v_1 := v.Args[1]
21712 v_0 := v.Args[0]
21713 b := v.Block
21714 typ := &b.Func.Config.Types
21715
21716
21717 for {
21718 off := auxIntToInt32(v.AuxInt)
21719 sym := auxToSym(v.Aux)
21720 ptr := v_0
21721 if v_1.Op != OpAMD64MOVQconst {
21722 break
21723 }
21724 c := auxIntToInt64(v_1.AuxInt)
21725 mem := v_2
21726 v.reset(OpAMD64SHRQconst)
21727 v.AuxInt = int8ToAuxInt(int8(c & 63))
21728 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21729 v0.AuxInt = int32ToAuxInt(off)
21730 v0.Aux = symToAux(sym)
21731 v0.AddArg2(ptr, mem)
21732 v.AddArg(v0)
21733 return true
21734 }
21735
21736
21737 for {
21738 off := auxIntToInt32(v.AuxInt)
21739 sym := auxToSym(v.Aux)
21740 ptr := v_0
21741 if v_1.Op != OpAMD64MOVLconst {
21742 break
21743 }
21744 c := auxIntToInt32(v_1.AuxInt)
21745 mem := v_2
21746 v.reset(OpAMD64SHRQconst)
21747 v.AuxInt = int8ToAuxInt(int8(c & 63))
21748 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21749 v0.AuxInt = int32ToAuxInt(off)
21750 v0.Aux = symToAux(sym)
21751 v0.AddArg2(ptr, mem)
21752 v.AddArg(v0)
21753 return true
21754 }
21755 return false
21756 }
21757 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
21758 v_1 := v.Args[1]
21759 v_0 := v.Args[0]
21760 b := v.Block
21761
21762
21763 for {
21764 x := v_0
21765 if v_1.Op != OpAMD64MOVLconst {
21766 break
21767 }
21768 c := auxIntToInt32(v_1.AuxInt)
21769 v.reset(OpAMD64SUBLconst)
21770 v.AuxInt = int32ToAuxInt(c)
21771 v.AddArg(x)
21772 return true
21773 }
21774
21775
21776 for {
21777 if v_0.Op != OpAMD64MOVLconst {
21778 break
21779 }
21780 c := auxIntToInt32(v_0.AuxInt)
21781 x := v_1
21782 v.reset(OpAMD64NEGL)
21783 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
21784 v0.AuxInt = int32ToAuxInt(c)
21785 v0.AddArg(x)
21786 v.AddArg(v0)
21787 return true
21788 }
21789
21790
21791 for {
21792 x := v_0
21793 if x != v_1 {
21794 break
21795 }
21796 v.reset(OpAMD64MOVLconst)
21797 v.AuxInt = int32ToAuxInt(0)
21798 return true
21799 }
21800
21801
21802
21803 for {
21804 x := v_0
21805 l := v_1
21806 if l.Op != OpAMD64MOVLload {
21807 break
21808 }
21809 off := auxIntToInt32(l.AuxInt)
21810 sym := auxToSym(l.Aux)
21811 mem := l.Args[1]
21812 ptr := l.Args[0]
21813 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
21814 break
21815 }
21816 v.reset(OpAMD64SUBLload)
21817 v.AuxInt = int32ToAuxInt(off)
21818 v.Aux = symToAux(sym)
21819 v.AddArg3(x, ptr, mem)
21820 return true
21821 }
21822 return false
21823 }
21824 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
21825 v_0 := v.Args[0]
21826
21827
21828
21829 for {
21830 c := auxIntToInt32(v.AuxInt)
21831 x := v_0
21832 if !(c == 0) {
21833 break
21834 }
21835 v.copyOf(x)
21836 return true
21837 }
21838
21839
21840 for {
21841 c := auxIntToInt32(v.AuxInt)
21842 x := v_0
21843 v.reset(OpAMD64ADDLconst)
21844 v.AuxInt = int32ToAuxInt(-c)
21845 v.AddArg(x)
21846 return true
21847 }
21848 }
21849 func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
21850 v_2 := v.Args[2]
21851 v_1 := v.Args[1]
21852 v_0 := v.Args[0]
21853 b := v.Block
21854 typ := &b.Func.Config.Types
21855
21856
21857
21858 for {
21859 off1 := auxIntToInt32(v.AuxInt)
21860 sym := auxToSym(v.Aux)
21861 val := v_0
21862 if v_1.Op != OpAMD64ADDQconst {
21863 break
21864 }
21865 off2 := auxIntToInt32(v_1.AuxInt)
21866 base := v_1.Args[0]
21867 mem := v_2
21868 if !(is32Bit(int64(off1) + int64(off2))) {
21869 break
21870 }
21871 v.reset(OpAMD64SUBLload)
21872 v.AuxInt = int32ToAuxInt(off1 + off2)
21873 v.Aux = symToAux(sym)
21874 v.AddArg3(val, base, mem)
21875 return true
21876 }
21877
21878
21879
21880 for {
21881 off1 := auxIntToInt32(v.AuxInt)
21882 sym1 := auxToSym(v.Aux)
21883 val := v_0
21884 if v_1.Op != OpAMD64LEAQ {
21885 break
21886 }
21887 off2 := auxIntToInt32(v_1.AuxInt)
21888 sym2 := auxToSym(v_1.Aux)
21889 base := v_1.Args[0]
21890 mem := v_2
21891 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21892 break
21893 }
21894 v.reset(OpAMD64SUBLload)
21895 v.AuxInt = int32ToAuxInt(off1 + off2)
21896 v.Aux = symToAux(mergeSym(sym1, sym2))
21897 v.AddArg3(val, base, mem)
21898 return true
21899 }
21900
21901
21902 for {
21903 off := auxIntToInt32(v.AuxInt)
21904 sym := auxToSym(v.Aux)
21905 x := v_0
21906 ptr := v_1
21907 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
21908 break
21909 }
21910 y := v_2.Args[1]
21911 if ptr != v_2.Args[0] {
21912 break
21913 }
21914 v.reset(OpAMD64SUBL)
21915 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
21916 v0.AddArg(y)
21917 v.AddArg2(x, v0)
21918 return true
21919 }
21920 return false
21921 }
21922 func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
21923 v_2 := v.Args[2]
21924 v_1 := v.Args[1]
21925 v_0 := v.Args[0]
21926
21927
21928
21929 for {
21930 off1 := auxIntToInt32(v.AuxInt)
21931 sym := auxToSym(v.Aux)
21932 if v_0.Op != OpAMD64ADDQconst {
21933 break
21934 }
21935 off2 := auxIntToInt32(v_0.AuxInt)
21936 base := v_0.Args[0]
21937 val := v_1
21938 mem := v_2
21939 if !(is32Bit(int64(off1) + int64(off2))) {
21940 break
21941 }
21942 v.reset(OpAMD64SUBLmodify)
21943 v.AuxInt = int32ToAuxInt(off1 + off2)
21944 v.Aux = symToAux(sym)
21945 v.AddArg3(base, val, mem)
21946 return true
21947 }
21948
21949
21950
21951 for {
21952 off1 := auxIntToInt32(v.AuxInt)
21953 sym1 := auxToSym(v.Aux)
21954 if v_0.Op != OpAMD64LEAQ {
21955 break
21956 }
21957 off2 := auxIntToInt32(v_0.AuxInt)
21958 sym2 := auxToSym(v_0.Aux)
21959 base := v_0.Args[0]
21960 val := v_1
21961 mem := v_2
21962 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21963 break
21964 }
21965 v.reset(OpAMD64SUBLmodify)
21966 v.AuxInt = int32ToAuxInt(off1 + off2)
21967 v.Aux = symToAux(mergeSym(sym1, sym2))
21968 v.AddArg3(base, val, mem)
21969 return true
21970 }
21971 return false
21972 }
21973 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
21974 v_1 := v.Args[1]
21975 v_0 := v.Args[0]
21976 b := v.Block
21977
21978
21979
21980 for {
21981 x := v_0
21982 if v_1.Op != OpAMD64MOVQconst {
21983 break
21984 }
21985 c := auxIntToInt64(v_1.AuxInt)
21986 if !(is32Bit(c)) {
21987 break
21988 }
21989 v.reset(OpAMD64SUBQconst)
21990 v.AuxInt = int32ToAuxInt(int32(c))
21991 v.AddArg(x)
21992 return true
21993 }
21994
21995
21996
21997 for {
21998 if v_0.Op != OpAMD64MOVQconst {
21999 break
22000 }
22001 c := auxIntToInt64(v_0.AuxInt)
22002 x := v_1
22003 if !(is32Bit(c)) {
22004 break
22005 }
22006 v.reset(OpAMD64NEGQ)
22007 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
22008 v0.AuxInt = int32ToAuxInt(int32(c))
22009 v0.AddArg(x)
22010 v.AddArg(v0)
22011 return true
22012 }
22013
22014
22015 for {
22016 x := v_0
22017 if x != v_1 {
22018 break
22019 }
22020 v.reset(OpAMD64MOVQconst)
22021 v.AuxInt = int64ToAuxInt(0)
22022 return true
22023 }
22024
22025
22026
22027 for {
22028 x := v_0
22029 l := v_1
22030 if l.Op != OpAMD64MOVQload {
22031 break
22032 }
22033 off := auxIntToInt32(l.AuxInt)
22034 sym := auxToSym(l.Aux)
22035 mem := l.Args[1]
22036 ptr := l.Args[0]
22037 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22038 break
22039 }
22040 v.reset(OpAMD64SUBQload)
22041 v.AuxInt = int32ToAuxInt(off)
22042 v.Aux = symToAux(sym)
22043 v.AddArg3(x, ptr, mem)
22044 return true
22045 }
22046 return false
22047 }
22048 func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
22049 v_1 := v.Args[1]
22050 v_0 := v.Args[0]
22051
22052
22053
22054 for {
22055 x := v_0
22056 if v_1.Op != OpAMD64MOVQconst {
22057 break
22058 }
22059 c := auxIntToInt64(v_1.AuxInt)
22060 if !(is32Bit(c)) {
22061 break
22062 }
22063 v.reset(OpAMD64SUBQconstborrow)
22064 v.AuxInt = int32ToAuxInt(int32(c))
22065 v.AddArg(x)
22066 return true
22067 }
22068 return false
22069 }
22070 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
22071 v_0 := v.Args[0]
22072
22073
22074 for {
22075 if auxIntToInt32(v.AuxInt) != 0 {
22076 break
22077 }
22078 x := v_0
22079 v.copyOf(x)
22080 return true
22081 }
22082
22083
22084
22085 for {
22086 c := auxIntToInt32(v.AuxInt)
22087 x := v_0
22088 if !(c != -(1 << 31)) {
22089 break
22090 }
22091 v.reset(OpAMD64ADDQconst)
22092 v.AuxInt = int32ToAuxInt(-c)
22093 v.AddArg(x)
22094 return true
22095 }
22096
22097
22098 for {
22099 c := auxIntToInt32(v.AuxInt)
22100 if v_0.Op != OpAMD64MOVQconst {
22101 break
22102 }
22103 d := auxIntToInt64(v_0.AuxInt)
22104 v.reset(OpAMD64MOVQconst)
22105 v.AuxInt = int64ToAuxInt(d - int64(c))
22106 return true
22107 }
22108
22109
22110
22111 for {
22112 c := auxIntToInt32(v.AuxInt)
22113 if v_0.Op != OpAMD64SUBQconst {
22114 break
22115 }
22116 d := auxIntToInt32(v_0.AuxInt)
22117 x := v_0.Args[0]
22118 if !(is32Bit(int64(-c) - int64(d))) {
22119 break
22120 }
22121 v.reset(OpAMD64ADDQconst)
22122 v.AuxInt = int32ToAuxInt(-c - d)
22123 v.AddArg(x)
22124 return true
22125 }
22126 return false
22127 }
22128 func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
22129 v_2 := v.Args[2]
22130 v_1 := v.Args[1]
22131 v_0 := v.Args[0]
22132 b := v.Block
22133 typ := &b.Func.Config.Types
22134
22135
22136
22137 for {
22138 off1 := auxIntToInt32(v.AuxInt)
22139 sym := auxToSym(v.Aux)
22140 val := v_0
22141 if v_1.Op != OpAMD64ADDQconst {
22142 break
22143 }
22144 off2 := auxIntToInt32(v_1.AuxInt)
22145 base := v_1.Args[0]
22146 mem := v_2
22147 if !(is32Bit(int64(off1) + int64(off2))) {
22148 break
22149 }
22150 v.reset(OpAMD64SUBQload)
22151 v.AuxInt = int32ToAuxInt(off1 + off2)
22152 v.Aux = symToAux(sym)
22153 v.AddArg3(val, base, mem)
22154 return true
22155 }
22156
22157
22158
22159 for {
22160 off1 := auxIntToInt32(v.AuxInt)
22161 sym1 := auxToSym(v.Aux)
22162 val := v_0
22163 if v_1.Op != OpAMD64LEAQ {
22164 break
22165 }
22166 off2 := auxIntToInt32(v_1.AuxInt)
22167 sym2 := auxToSym(v_1.Aux)
22168 base := v_1.Args[0]
22169 mem := v_2
22170 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22171 break
22172 }
22173 v.reset(OpAMD64SUBQload)
22174 v.AuxInt = int32ToAuxInt(off1 + off2)
22175 v.Aux = symToAux(mergeSym(sym1, sym2))
22176 v.AddArg3(val, base, mem)
22177 return true
22178 }
22179
22180
22181 for {
22182 off := auxIntToInt32(v.AuxInt)
22183 sym := auxToSym(v.Aux)
22184 x := v_0
22185 ptr := v_1
22186 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22187 break
22188 }
22189 y := v_2.Args[1]
22190 if ptr != v_2.Args[0] {
22191 break
22192 }
22193 v.reset(OpAMD64SUBQ)
22194 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
22195 v0.AddArg(y)
22196 v.AddArg2(x, v0)
22197 return true
22198 }
22199 return false
22200 }
22201 func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
22202 v_2 := v.Args[2]
22203 v_1 := v.Args[1]
22204 v_0 := v.Args[0]
22205
22206
22207
22208 for {
22209 off1 := auxIntToInt32(v.AuxInt)
22210 sym := auxToSym(v.Aux)
22211 if v_0.Op != OpAMD64ADDQconst {
22212 break
22213 }
22214 off2 := auxIntToInt32(v_0.AuxInt)
22215 base := v_0.Args[0]
22216 val := v_1
22217 mem := v_2
22218 if !(is32Bit(int64(off1) + int64(off2))) {
22219 break
22220 }
22221 v.reset(OpAMD64SUBQmodify)
22222 v.AuxInt = int32ToAuxInt(off1 + off2)
22223 v.Aux = symToAux(sym)
22224 v.AddArg3(base, val, mem)
22225 return true
22226 }
22227
22228
22229
22230 for {
22231 off1 := auxIntToInt32(v.AuxInt)
22232 sym1 := auxToSym(v.Aux)
22233 if v_0.Op != OpAMD64LEAQ {
22234 break
22235 }
22236 off2 := auxIntToInt32(v_0.AuxInt)
22237 sym2 := auxToSym(v_0.Aux)
22238 base := v_0.Args[0]
22239 val := v_1
22240 mem := v_2
22241 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22242 break
22243 }
22244 v.reset(OpAMD64SUBQmodify)
22245 v.AuxInt = int32ToAuxInt(off1 + off2)
22246 v.Aux = symToAux(mergeSym(sym1, sym2))
22247 v.AddArg3(base, val, mem)
22248 return true
22249 }
22250 return false
22251 }
22252 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
22253 v_1 := v.Args[1]
22254 v_0 := v.Args[0]
22255
22256
22257
22258 for {
22259 x := v_0
22260 l := v_1
22261 if l.Op != OpAMD64MOVSDload {
22262 break
22263 }
22264 off := auxIntToInt32(l.AuxInt)
22265 sym := auxToSym(l.Aux)
22266 mem := l.Args[1]
22267 ptr := l.Args[0]
22268 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22269 break
22270 }
22271 v.reset(OpAMD64SUBSDload)
22272 v.AuxInt = int32ToAuxInt(off)
22273 v.Aux = symToAux(sym)
22274 v.AddArg3(x, ptr, mem)
22275 return true
22276 }
22277 return false
22278 }
22279 func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
22280 v_2 := v.Args[2]
22281 v_1 := v.Args[1]
22282 v_0 := v.Args[0]
22283 b := v.Block
22284 typ := &b.Func.Config.Types
22285
22286
22287
22288 for {
22289 off1 := auxIntToInt32(v.AuxInt)
22290 sym := auxToSym(v.Aux)
22291 val := v_0
22292 if v_1.Op != OpAMD64ADDQconst {
22293 break
22294 }
22295 off2 := auxIntToInt32(v_1.AuxInt)
22296 base := v_1.Args[0]
22297 mem := v_2
22298 if !(is32Bit(int64(off1) + int64(off2))) {
22299 break
22300 }
22301 v.reset(OpAMD64SUBSDload)
22302 v.AuxInt = int32ToAuxInt(off1 + off2)
22303 v.Aux = symToAux(sym)
22304 v.AddArg3(val, base, mem)
22305 return true
22306 }
22307
22308
22309
22310 for {
22311 off1 := auxIntToInt32(v.AuxInt)
22312 sym1 := auxToSym(v.Aux)
22313 val := v_0
22314 if v_1.Op != OpAMD64LEAQ {
22315 break
22316 }
22317 off2 := auxIntToInt32(v_1.AuxInt)
22318 sym2 := auxToSym(v_1.Aux)
22319 base := v_1.Args[0]
22320 mem := v_2
22321 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22322 break
22323 }
22324 v.reset(OpAMD64SUBSDload)
22325 v.AuxInt = int32ToAuxInt(off1 + off2)
22326 v.Aux = symToAux(mergeSym(sym1, sym2))
22327 v.AddArg3(val, base, mem)
22328 return true
22329 }
22330
22331
22332 for {
22333 off := auxIntToInt32(v.AuxInt)
22334 sym := auxToSym(v.Aux)
22335 x := v_0
22336 ptr := v_1
22337 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22338 break
22339 }
22340 y := v_2.Args[1]
22341 if ptr != v_2.Args[0] {
22342 break
22343 }
22344 v.reset(OpAMD64SUBSD)
22345 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
22346 v0.AddArg(y)
22347 v.AddArg2(x, v0)
22348 return true
22349 }
22350 return false
22351 }
22352 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
22353 v_1 := v.Args[1]
22354 v_0 := v.Args[0]
22355
22356
22357
22358 for {
22359 x := v_0
22360 l := v_1
22361 if l.Op != OpAMD64MOVSSload {
22362 break
22363 }
22364 off := auxIntToInt32(l.AuxInt)
22365 sym := auxToSym(l.Aux)
22366 mem := l.Args[1]
22367 ptr := l.Args[0]
22368 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22369 break
22370 }
22371 v.reset(OpAMD64SUBSSload)
22372 v.AuxInt = int32ToAuxInt(off)
22373 v.Aux = symToAux(sym)
22374 v.AddArg3(x, ptr, mem)
22375 return true
22376 }
22377 return false
22378 }
22379 func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
22380 v_2 := v.Args[2]
22381 v_1 := v.Args[1]
22382 v_0 := v.Args[0]
22383 b := v.Block
22384 typ := &b.Func.Config.Types
22385
22386
22387
22388 for {
22389 off1 := auxIntToInt32(v.AuxInt)
22390 sym := auxToSym(v.Aux)
22391 val := v_0
22392 if v_1.Op != OpAMD64ADDQconst {
22393 break
22394 }
22395 off2 := auxIntToInt32(v_1.AuxInt)
22396 base := v_1.Args[0]
22397 mem := v_2
22398 if !(is32Bit(int64(off1) + int64(off2))) {
22399 break
22400 }
22401 v.reset(OpAMD64SUBSSload)
22402 v.AuxInt = int32ToAuxInt(off1 + off2)
22403 v.Aux = symToAux(sym)
22404 v.AddArg3(val, base, mem)
22405 return true
22406 }
22407
22408
22409
22410 for {
22411 off1 := auxIntToInt32(v.AuxInt)
22412 sym1 := auxToSym(v.Aux)
22413 val := v_0
22414 if v_1.Op != OpAMD64LEAQ {
22415 break
22416 }
22417 off2 := auxIntToInt32(v_1.AuxInt)
22418 sym2 := auxToSym(v_1.Aux)
22419 base := v_1.Args[0]
22420 mem := v_2
22421 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22422 break
22423 }
22424 v.reset(OpAMD64SUBSSload)
22425 v.AuxInt = int32ToAuxInt(off1 + off2)
22426 v.Aux = symToAux(mergeSym(sym1, sym2))
22427 v.AddArg3(val, base, mem)
22428 return true
22429 }
22430
22431
22432 for {
22433 off := auxIntToInt32(v.AuxInt)
22434 sym := auxToSym(v.Aux)
22435 x := v_0
22436 ptr := v_1
22437 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22438 break
22439 }
22440 y := v_2.Args[1]
22441 if ptr != v_2.Args[0] {
22442 break
22443 }
22444 v.reset(OpAMD64SUBSS)
22445 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
22446 v0.AddArg(y)
22447 v.AddArg2(x, v0)
22448 return true
22449 }
22450 return false
22451 }
22452 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
22453 v_1 := v.Args[1]
22454 v_0 := v.Args[0]
22455 b := v.Block
22456
22457
22458 for {
22459 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22460 if v_0.Op != OpAMD64MOVLconst {
22461 continue
22462 }
22463 c := auxIntToInt32(v_0.AuxInt)
22464 x := v_1
22465 v.reset(OpAMD64TESTBconst)
22466 v.AuxInt = int8ToAuxInt(int8(c))
22467 v.AddArg(x)
22468 return true
22469 }
22470 break
22471 }
22472
22473
22474
22475 for {
22476 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22477 l := v_0
22478 if l.Op != OpAMD64MOVBload {
22479 continue
22480 }
22481 off := auxIntToInt32(l.AuxInt)
22482 sym := auxToSym(l.Aux)
22483 mem := l.Args[1]
22484 ptr := l.Args[0]
22485 l2 := v_1
22486 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22487 continue
22488 }
22489 b = l.Block
22490 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
22491 v.copyOf(v0)
22492 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22493 v0.Aux = symToAux(sym)
22494 v0.AddArg2(ptr, mem)
22495 return true
22496 }
22497 break
22498 }
22499 return false
22500 }
22501 func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
22502 v_0 := v.Args[0]
22503
22504
22505
22506 for {
22507 if auxIntToInt8(v.AuxInt) != -1 {
22508 break
22509 }
22510 x := v_0
22511 if !(x.Op != OpAMD64MOVLconst) {
22512 break
22513 }
22514 v.reset(OpAMD64TESTB)
22515 v.AddArg2(x, x)
22516 return true
22517 }
22518 return false
22519 }
22520 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
22521 v_1 := v.Args[1]
22522 v_0 := v.Args[0]
22523 b := v.Block
22524
22525
22526 for {
22527 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22528 if v_0.Op != OpAMD64MOVLconst {
22529 continue
22530 }
22531 c := auxIntToInt32(v_0.AuxInt)
22532 x := v_1
22533 v.reset(OpAMD64TESTLconst)
22534 v.AuxInt = int32ToAuxInt(c)
22535 v.AddArg(x)
22536 return true
22537 }
22538 break
22539 }
22540
22541
22542
22543 for {
22544 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22545 l := v_0
22546 if l.Op != OpAMD64MOVLload {
22547 continue
22548 }
22549 off := auxIntToInt32(l.AuxInt)
22550 sym := auxToSym(l.Aux)
22551 mem := l.Args[1]
22552 ptr := l.Args[0]
22553 l2 := v_1
22554 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22555 continue
22556 }
22557 b = l.Block
22558 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
22559 v.copyOf(v0)
22560 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22561 v0.Aux = symToAux(sym)
22562 v0.AddArg2(ptr, mem)
22563 return true
22564 }
22565 break
22566 }
22567
22568
22569
22570 for {
22571 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22572 a := v_0
22573 if a.Op != OpAMD64ANDLload {
22574 continue
22575 }
22576 off := auxIntToInt32(a.AuxInt)
22577 sym := auxToSym(a.Aux)
22578 mem := a.Args[2]
22579 x := a.Args[0]
22580 ptr := a.Args[1]
22581 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
22582 continue
22583 }
22584 v.reset(OpAMD64TESTL)
22585 v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
22586 v0.AuxInt = int32ToAuxInt(off)
22587 v0.Aux = symToAux(sym)
22588 v0.AddArg2(ptr, mem)
22589 v.AddArg2(v0, x)
22590 return true
22591 }
22592 break
22593 }
22594 return false
22595 }
22596 func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
22597 v_0 := v.Args[0]
22598
22599
22600
22601 for {
22602 c := auxIntToInt32(v.AuxInt)
22603 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
22604 break
22605 }
22606 v.reset(OpAMD64FlagEQ)
22607 return true
22608 }
22609
22610
22611
22612 for {
22613 c := auxIntToInt32(v.AuxInt)
22614 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
22615 break
22616 }
22617 v.reset(OpAMD64FlagLT_UGT)
22618 return true
22619 }
22620
22621
22622
22623 for {
22624 c := auxIntToInt32(v.AuxInt)
22625 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
22626 break
22627 }
22628 v.reset(OpAMD64FlagGT_UGT)
22629 return true
22630 }
22631
22632
22633
22634 for {
22635 if auxIntToInt32(v.AuxInt) != -1 {
22636 break
22637 }
22638 x := v_0
22639 if !(x.Op != OpAMD64MOVLconst) {
22640 break
22641 }
22642 v.reset(OpAMD64TESTL)
22643 v.AddArg2(x, x)
22644 return true
22645 }
22646 return false
22647 }
22648 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
22649 v_1 := v.Args[1]
22650 v_0 := v.Args[0]
22651 b := v.Block
22652
22653
22654
22655 for {
22656 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22657 if v_0.Op != OpAMD64MOVQconst {
22658 continue
22659 }
22660 c := auxIntToInt64(v_0.AuxInt)
22661 x := v_1
22662 if !(is32Bit(c)) {
22663 continue
22664 }
22665 v.reset(OpAMD64TESTQconst)
22666 v.AuxInt = int32ToAuxInt(int32(c))
22667 v.AddArg(x)
22668 return true
22669 }
22670 break
22671 }
22672
22673
22674
22675 for {
22676 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22677 l := v_0
22678 if l.Op != OpAMD64MOVQload {
22679 continue
22680 }
22681 off := auxIntToInt32(l.AuxInt)
22682 sym := auxToSym(l.Aux)
22683 mem := l.Args[1]
22684 ptr := l.Args[0]
22685 l2 := v_1
22686 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22687 continue
22688 }
22689 b = l.Block
22690 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
22691 v.copyOf(v0)
22692 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22693 v0.Aux = symToAux(sym)
22694 v0.AddArg2(ptr, mem)
22695 return true
22696 }
22697 break
22698 }
22699
22700
22701
22702 for {
22703 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22704 a := v_0
22705 if a.Op != OpAMD64ANDQload {
22706 continue
22707 }
22708 off := auxIntToInt32(a.AuxInt)
22709 sym := auxToSym(a.Aux)
22710 mem := a.Args[2]
22711 x := a.Args[0]
22712 ptr := a.Args[1]
22713 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
22714 continue
22715 }
22716 v.reset(OpAMD64TESTQ)
22717 v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
22718 v0.AuxInt = int32ToAuxInt(off)
22719 v0.Aux = symToAux(sym)
22720 v0.AddArg2(ptr, mem)
22721 v.AddArg2(v0, x)
22722 return true
22723 }
22724 break
22725 }
22726 return false
22727 }
22728 func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
22729 v_0 := v.Args[0]
22730
22731
22732
22733 for {
22734 c := auxIntToInt32(v.AuxInt)
22735 if v_0.Op != OpAMD64MOVQconst {
22736 break
22737 }
22738 d := auxIntToInt64(v_0.AuxInt)
22739 if !(int64(c) == d && c == 0) {
22740 break
22741 }
22742 v.reset(OpAMD64FlagEQ)
22743 return true
22744 }
22745
22746
22747
22748 for {
22749 c := auxIntToInt32(v.AuxInt)
22750 if v_0.Op != OpAMD64MOVQconst {
22751 break
22752 }
22753 d := auxIntToInt64(v_0.AuxInt)
22754 if !(int64(c) == d && c < 0) {
22755 break
22756 }
22757 v.reset(OpAMD64FlagLT_UGT)
22758 return true
22759 }
22760
22761
22762
22763 for {
22764 c := auxIntToInt32(v.AuxInt)
22765 if v_0.Op != OpAMD64MOVQconst {
22766 break
22767 }
22768 d := auxIntToInt64(v_0.AuxInt)
22769 if !(int64(c) == d && c > 0) {
22770 break
22771 }
22772 v.reset(OpAMD64FlagGT_UGT)
22773 return true
22774 }
22775
22776
22777
22778 for {
22779 if auxIntToInt32(v.AuxInt) != -1 {
22780 break
22781 }
22782 x := v_0
22783 if !(x.Op != OpAMD64MOVQconst) {
22784 break
22785 }
22786 v.reset(OpAMD64TESTQ)
22787 v.AddArg2(x, x)
22788 return true
22789 }
22790 return false
22791 }
22792 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
22793 v_1 := v.Args[1]
22794 v_0 := v.Args[0]
22795 b := v.Block
22796
22797
22798 for {
22799 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22800 if v_0.Op != OpAMD64MOVLconst {
22801 continue
22802 }
22803 c := auxIntToInt32(v_0.AuxInt)
22804 x := v_1
22805 v.reset(OpAMD64TESTWconst)
22806 v.AuxInt = int16ToAuxInt(int16(c))
22807 v.AddArg(x)
22808 return true
22809 }
22810 break
22811 }
22812
22813
22814
22815 for {
22816 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22817 l := v_0
22818 if l.Op != OpAMD64MOVWload {
22819 continue
22820 }
22821 off := auxIntToInt32(l.AuxInt)
22822 sym := auxToSym(l.Aux)
22823 mem := l.Args[1]
22824 ptr := l.Args[0]
22825 l2 := v_1
22826 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22827 continue
22828 }
22829 b = l.Block
22830 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
22831 v.copyOf(v0)
22832 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22833 v0.Aux = symToAux(sym)
22834 v0.AddArg2(ptr, mem)
22835 return true
22836 }
22837 break
22838 }
22839 return false
22840 }
22841 func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
22842 v_0 := v.Args[0]
22843
22844
22845
22846 for {
22847 if auxIntToInt16(v.AuxInt) != -1 {
22848 break
22849 }
22850 x := v_0
22851 if !(x.Op != OpAMD64MOVLconst) {
22852 break
22853 }
22854 v.reset(OpAMD64TESTW)
22855 v.AddArg2(x, x)
22856 return true
22857 }
22858 return false
22859 }
22860 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
22861 v_2 := v.Args[2]
22862 v_1 := v.Args[1]
22863 v_0 := v.Args[0]
22864
22865
22866
22867 for {
22868 off1 := auxIntToInt32(v.AuxInt)
22869 sym := auxToSym(v.Aux)
22870 val := v_0
22871 if v_1.Op != OpAMD64ADDQconst {
22872 break
22873 }
22874 off2 := auxIntToInt32(v_1.AuxInt)
22875 ptr := v_1.Args[0]
22876 mem := v_2
22877 if !(is32Bit(int64(off1) + int64(off2))) {
22878 break
22879 }
22880 v.reset(OpAMD64XADDLlock)
22881 v.AuxInt = int32ToAuxInt(off1 + off2)
22882 v.Aux = symToAux(sym)
22883 v.AddArg3(val, ptr, mem)
22884 return true
22885 }
22886 return false
22887 }
22888 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
22889 v_2 := v.Args[2]
22890 v_1 := v.Args[1]
22891 v_0 := v.Args[0]
22892
22893
22894
22895 for {
22896 off1 := auxIntToInt32(v.AuxInt)
22897 sym := auxToSym(v.Aux)
22898 val := v_0
22899 if v_1.Op != OpAMD64ADDQconst {
22900 break
22901 }
22902 off2 := auxIntToInt32(v_1.AuxInt)
22903 ptr := v_1.Args[0]
22904 mem := v_2
22905 if !(is32Bit(int64(off1) + int64(off2))) {
22906 break
22907 }
22908 v.reset(OpAMD64XADDQlock)
22909 v.AuxInt = int32ToAuxInt(off1 + off2)
22910 v.Aux = symToAux(sym)
22911 v.AddArg3(val, ptr, mem)
22912 return true
22913 }
22914 return false
22915 }
22916 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
22917 v_2 := v.Args[2]
22918 v_1 := v.Args[1]
22919 v_0 := v.Args[0]
22920
22921
22922
22923 for {
22924 off1 := auxIntToInt32(v.AuxInt)
22925 sym := auxToSym(v.Aux)
22926 val := v_0
22927 if v_1.Op != OpAMD64ADDQconst {
22928 break
22929 }
22930 off2 := auxIntToInt32(v_1.AuxInt)
22931 ptr := v_1.Args[0]
22932 mem := v_2
22933 if !(is32Bit(int64(off1) + int64(off2))) {
22934 break
22935 }
22936 v.reset(OpAMD64XCHGL)
22937 v.AuxInt = int32ToAuxInt(off1 + off2)
22938 v.Aux = symToAux(sym)
22939 v.AddArg3(val, ptr, mem)
22940 return true
22941 }
22942
22943
22944
22945 for {
22946 off1 := auxIntToInt32(v.AuxInt)
22947 sym1 := auxToSym(v.Aux)
22948 val := v_0
22949 if v_1.Op != OpAMD64LEAQ {
22950 break
22951 }
22952 off2 := auxIntToInt32(v_1.AuxInt)
22953 sym2 := auxToSym(v_1.Aux)
22954 ptr := v_1.Args[0]
22955 mem := v_2
22956 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
22957 break
22958 }
22959 v.reset(OpAMD64XCHGL)
22960 v.AuxInt = int32ToAuxInt(off1 + off2)
22961 v.Aux = symToAux(mergeSym(sym1, sym2))
22962 v.AddArg3(val, ptr, mem)
22963 return true
22964 }
22965 return false
22966 }
22967 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
22968 v_2 := v.Args[2]
22969 v_1 := v.Args[1]
22970 v_0 := v.Args[0]
22971
22972
22973
22974 for {
22975 off1 := auxIntToInt32(v.AuxInt)
22976 sym := auxToSym(v.Aux)
22977 val := v_0
22978 if v_1.Op != OpAMD64ADDQconst {
22979 break
22980 }
22981 off2 := auxIntToInt32(v_1.AuxInt)
22982 ptr := v_1.Args[0]
22983 mem := v_2
22984 if !(is32Bit(int64(off1) + int64(off2))) {
22985 break
22986 }
22987 v.reset(OpAMD64XCHGQ)
22988 v.AuxInt = int32ToAuxInt(off1 + off2)
22989 v.Aux = symToAux(sym)
22990 v.AddArg3(val, ptr, mem)
22991 return true
22992 }
22993
22994
22995
22996 for {
22997 off1 := auxIntToInt32(v.AuxInt)
22998 sym1 := auxToSym(v.Aux)
22999 val := v_0
23000 if v_1.Op != OpAMD64LEAQ {
23001 break
23002 }
23003 off2 := auxIntToInt32(v_1.AuxInt)
23004 sym2 := auxToSym(v_1.Aux)
23005 ptr := v_1.Args[0]
23006 mem := v_2
23007 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
23008 break
23009 }
23010 v.reset(OpAMD64XCHGQ)
23011 v.AuxInt = int32ToAuxInt(off1 + off2)
23012 v.Aux = symToAux(mergeSym(sym1, sym2))
23013 v.AddArg3(val, ptr, mem)
23014 return true
23015 }
23016 return false
23017 }
23018 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
23019 v_1 := v.Args[1]
23020 v_0 := v.Args[0]
23021
23022
23023 for {
23024 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23025 if v_0.Op != OpAMD64SHLL {
23026 continue
23027 }
23028 y := v_0.Args[1]
23029 v_0_0 := v_0.Args[0]
23030 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
23031 continue
23032 }
23033 x := v_1
23034 v.reset(OpAMD64BTCL)
23035 v.AddArg2(x, y)
23036 return true
23037 }
23038 break
23039 }
23040
23041
23042 for {
23043 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23044 x := v_0
23045 if v_1.Op != OpAMD64MOVLconst {
23046 continue
23047 }
23048 c := auxIntToInt32(v_1.AuxInt)
23049 v.reset(OpAMD64XORLconst)
23050 v.AuxInt = int32ToAuxInt(c)
23051 v.AddArg(x)
23052 return true
23053 }
23054 break
23055 }
23056
23057
23058 for {
23059 x := v_0
23060 if x != v_1 {
23061 break
23062 }
23063 v.reset(OpAMD64MOVLconst)
23064 v.AuxInt = int32ToAuxInt(0)
23065 return true
23066 }
23067
23068
23069
23070 for {
23071 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23072 x := v_0
23073 l := v_1
23074 if l.Op != OpAMD64MOVLload {
23075 continue
23076 }
23077 off := auxIntToInt32(l.AuxInt)
23078 sym := auxToSym(l.Aux)
23079 mem := l.Args[1]
23080 ptr := l.Args[0]
23081 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23082 continue
23083 }
23084 v.reset(OpAMD64XORLload)
23085 v.AuxInt = int32ToAuxInt(off)
23086 v.Aux = symToAux(sym)
23087 v.AddArg3(x, ptr, mem)
23088 return true
23089 }
23090 break
23091 }
23092
23093
23094
23095 for {
23096 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23097 x := v_0
23098 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23099 continue
23100 }
23101 v.reset(OpAMD64BLSMSKL)
23102 v.AddArg(x)
23103 return true
23104 }
23105 break
23106 }
23107 return false
23108 }
23109 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
23110 v_0 := v.Args[0]
23111
23112
23113 for {
23114 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
23115 break
23116 }
23117 x := v_0.Args[0]
23118 v.reset(OpAMD64SETEQ)
23119 v.AddArg(x)
23120 return true
23121 }
23122
23123
23124 for {
23125 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
23126 break
23127 }
23128 x := v_0.Args[0]
23129 v.reset(OpAMD64SETNE)
23130 v.AddArg(x)
23131 return true
23132 }
23133
23134
23135 for {
23136 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
23137 break
23138 }
23139 x := v_0.Args[0]
23140 v.reset(OpAMD64SETGE)
23141 v.AddArg(x)
23142 return true
23143 }
23144
23145
23146 for {
23147 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
23148 break
23149 }
23150 x := v_0.Args[0]
23151 v.reset(OpAMD64SETL)
23152 v.AddArg(x)
23153 return true
23154 }
23155
23156
23157 for {
23158 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
23159 break
23160 }
23161 x := v_0.Args[0]
23162 v.reset(OpAMD64SETG)
23163 v.AddArg(x)
23164 return true
23165 }
23166
23167
23168 for {
23169 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
23170 break
23171 }
23172 x := v_0.Args[0]
23173 v.reset(OpAMD64SETLE)
23174 v.AddArg(x)
23175 return true
23176 }
23177
23178
23179 for {
23180 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
23181 break
23182 }
23183 x := v_0.Args[0]
23184 v.reset(OpAMD64SETAE)
23185 v.AddArg(x)
23186 return true
23187 }
23188
23189
23190 for {
23191 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
23192 break
23193 }
23194 x := v_0.Args[0]
23195 v.reset(OpAMD64SETB)
23196 v.AddArg(x)
23197 return true
23198 }
23199
23200
23201 for {
23202 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
23203 break
23204 }
23205 x := v_0.Args[0]
23206 v.reset(OpAMD64SETA)
23207 v.AddArg(x)
23208 return true
23209 }
23210
23211
23212 for {
23213 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
23214 break
23215 }
23216 x := v_0.Args[0]
23217 v.reset(OpAMD64SETBE)
23218 v.AddArg(x)
23219 return true
23220 }
23221
23222
23223 for {
23224 c := auxIntToInt32(v.AuxInt)
23225 if v_0.Op != OpAMD64XORLconst {
23226 break
23227 }
23228 d := auxIntToInt32(v_0.AuxInt)
23229 x := v_0.Args[0]
23230 v.reset(OpAMD64XORLconst)
23231 v.AuxInt = int32ToAuxInt(c ^ d)
23232 v.AddArg(x)
23233 return true
23234 }
23235
23236
23237
23238 for {
23239 c := auxIntToInt32(v.AuxInt)
23240 x := v_0
23241 if !(c == 0) {
23242 break
23243 }
23244 v.copyOf(x)
23245 return true
23246 }
23247
23248
23249 for {
23250 c := auxIntToInt32(v.AuxInt)
23251 if v_0.Op != OpAMD64MOVLconst {
23252 break
23253 }
23254 d := auxIntToInt32(v_0.AuxInt)
23255 v.reset(OpAMD64MOVLconst)
23256 v.AuxInt = int32ToAuxInt(c ^ d)
23257 return true
23258 }
23259 return false
23260 }
23261 func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
23262 v_1 := v.Args[1]
23263 v_0 := v.Args[0]
23264
23265
23266
23267 for {
23268 valoff1 := auxIntToValAndOff(v.AuxInt)
23269 sym := auxToSym(v.Aux)
23270 if v_0.Op != OpAMD64ADDQconst {
23271 break
23272 }
23273 off2 := auxIntToInt32(v_0.AuxInt)
23274 base := v_0.Args[0]
23275 mem := v_1
23276 if !(ValAndOff(valoff1).canAdd32(off2)) {
23277 break
23278 }
23279 v.reset(OpAMD64XORLconstmodify)
23280 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23281 v.Aux = symToAux(sym)
23282 v.AddArg2(base, mem)
23283 return true
23284 }
23285
23286
23287
23288 for {
23289 valoff1 := auxIntToValAndOff(v.AuxInt)
23290 sym1 := auxToSym(v.Aux)
23291 if v_0.Op != OpAMD64LEAQ {
23292 break
23293 }
23294 off2 := auxIntToInt32(v_0.AuxInt)
23295 sym2 := auxToSym(v_0.Aux)
23296 base := v_0.Args[0]
23297 mem := v_1
23298 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23299 break
23300 }
23301 v.reset(OpAMD64XORLconstmodify)
23302 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23303 v.Aux = symToAux(mergeSym(sym1, sym2))
23304 v.AddArg2(base, mem)
23305 return true
23306 }
23307 return false
23308 }
23309 func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
23310 v_2 := v.Args[2]
23311 v_1 := v.Args[1]
23312 v_0 := v.Args[0]
23313 b := v.Block
23314 typ := &b.Func.Config.Types
23315
23316
23317
23318 for {
23319 off1 := auxIntToInt32(v.AuxInt)
23320 sym := auxToSym(v.Aux)
23321 val := v_0
23322 if v_1.Op != OpAMD64ADDQconst {
23323 break
23324 }
23325 off2 := auxIntToInt32(v_1.AuxInt)
23326 base := v_1.Args[0]
23327 mem := v_2
23328 if !(is32Bit(int64(off1) + int64(off2))) {
23329 break
23330 }
23331 v.reset(OpAMD64XORLload)
23332 v.AuxInt = int32ToAuxInt(off1 + off2)
23333 v.Aux = symToAux(sym)
23334 v.AddArg3(val, base, mem)
23335 return true
23336 }
23337
23338
23339
23340 for {
23341 off1 := auxIntToInt32(v.AuxInt)
23342 sym1 := auxToSym(v.Aux)
23343 val := v_0
23344 if v_1.Op != OpAMD64LEAQ {
23345 break
23346 }
23347 off2 := auxIntToInt32(v_1.AuxInt)
23348 sym2 := auxToSym(v_1.Aux)
23349 base := v_1.Args[0]
23350 mem := v_2
23351 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23352 break
23353 }
23354 v.reset(OpAMD64XORLload)
23355 v.AuxInt = int32ToAuxInt(off1 + off2)
23356 v.Aux = symToAux(mergeSym(sym1, sym2))
23357 v.AddArg3(val, base, mem)
23358 return true
23359 }
23360
23361
23362 for {
23363 off := auxIntToInt32(v.AuxInt)
23364 sym := auxToSym(v.Aux)
23365 x := v_0
23366 ptr := v_1
23367 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23368 break
23369 }
23370 y := v_2.Args[1]
23371 if ptr != v_2.Args[0] {
23372 break
23373 }
23374 v.reset(OpAMD64XORL)
23375 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
23376 v0.AddArg(y)
23377 v.AddArg2(x, v0)
23378 return true
23379 }
23380 return false
23381 }
23382 func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
23383 v_2 := v.Args[2]
23384 v_1 := v.Args[1]
23385 v_0 := v.Args[0]
23386
23387
23388
23389 for {
23390 off1 := auxIntToInt32(v.AuxInt)
23391 sym := auxToSym(v.Aux)
23392 if v_0.Op != OpAMD64ADDQconst {
23393 break
23394 }
23395 off2 := auxIntToInt32(v_0.AuxInt)
23396 base := v_0.Args[0]
23397 val := v_1
23398 mem := v_2
23399 if !(is32Bit(int64(off1) + int64(off2))) {
23400 break
23401 }
23402 v.reset(OpAMD64XORLmodify)
23403 v.AuxInt = int32ToAuxInt(off1 + off2)
23404 v.Aux = symToAux(sym)
23405 v.AddArg3(base, val, mem)
23406 return true
23407 }
23408
23409
23410
23411 for {
23412 off1 := auxIntToInt32(v.AuxInt)
23413 sym1 := auxToSym(v.Aux)
23414 if v_0.Op != OpAMD64LEAQ {
23415 break
23416 }
23417 off2 := auxIntToInt32(v_0.AuxInt)
23418 sym2 := auxToSym(v_0.Aux)
23419 base := v_0.Args[0]
23420 val := v_1
23421 mem := v_2
23422 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23423 break
23424 }
23425 v.reset(OpAMD64XORLmodify)
23426 v.AuxInt = int32ToAuxInt(off1 + off2)
23427 v.Aux = symToAux(mergeSym(sym1, sym2))
23428 v.AddArg3(base, val, mem)
23429 return true
23430 }
23431 return false
23432 }
23433 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
23434 v_1 := v.Args[1]
23435 v_0 := v.Args[0]
23436
23437
23438 for {
23439 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23440 if v_0.Op != OpAMD64SHLQ {
23441 continue
23442 }
23443 y := v_0.Args[1]
23444 v_0_0 := v_0.Args[0]
23445 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
23446 continue
23447 }
23448 x := v_1
23449 v.reset(OpAMD64BTCQ)
23450 v.AddArg2(x, y)
23451 return true
23452 }
23453 break
23454 }
23455
23456
23457
23458 for {
23459 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23460 if v_0.Op != OpAMD64MOVQconst {
23461 continue
23462 }
23463 c := auxIntToInt64(v_0.AuxInt)
23464 x := v_1
23465 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
23466 continue
23467 }
23468 v.reset(OpAMD64BTCQconst)
23469 v.AuxInt = int8ToAuxInt(int8(log64(c)))
23470 v.AddArg(x)
23471 return true
23472 }
23473 break
23474 }
23475
23476
23477
23478 for {
23479 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23480 x := v_0
23481 if v_1.Op != OpAMD64MOVQconst {
23482 continue
23483 }
23484 c := auxIntToInt64(v_1.AuxInt)
23485 if !(is32Bit(c)) {
23486 continue
23487 }
23488 v.reset(OpAMD64XORQconst)
23489 v.AuxInt = int32ToAuxInt(int32(c))
23490 v.AddArg(x)
23491 return true
23492 }
23493 break
23494 }
23495
23496
23497 for {
23498 x := v_0
23499 if x != v_1 {
23500 break
23501 }
23502 v.reset(OpAMD64MOVQconst)
23503 v.AuxInt = int64ToAuxInt(0)
23504 return true
23505 }
23506
23507
23508
23509 for {
23510 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23511 x := v_0
23512 l := v_1
23513 if l.Op != OpAMD64MOVQload {
23514 continue
23515 }
23516 off := auxIntToInt32(l.AuxInt)
23517 sym := auxToSym(l.Aux)
23518 mem := l.Args[1]
23519 ptr := l.Args[0]
23520 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23521 continue
23522 }
23523 v.reset(OpAMD64XORQload)
23524 v.AuxInt = int32ToAuxInt(off)
23525 v.Aux = symToAux(sym)
23526 v.AddArg3(x, ptr, mem)
23527 return true
23528 }
23529 break
23530 }
23531
23532
23533
23534 for {
23535 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23536 x := v_0
23537 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23538 continue
23539 }
23540 v.reset(OpAMD64BLSMSKQ)
23541 v.AddArg(x)
23542 return true
23543 }
23544 break
23545 }
23546 return false
23547 }
23548 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
23549 v_0 := v.Args[0]
23550
23551
23552 for {
23553 c := auxIntToInt32(v.AuxInt)
23554 if v_0.Op != OpAMD64XORQconst {
23555 break
23556 }
23557 d := auxIntToInt32(v_0.AuxInt)
23558 x := v_0.Args[0]
23559 v.reset(OpAMD64XORQconst)
23560 v.AuxInt = int32ToAuxInt(c ^ d)
23561 v.AddArg(x)
23562 return true
23563 }
23564
23565
23566 for {
23567 if auxIntToInt32(v.AuxInt) != 0 {
23568 break
23569 }
23570 x := v_0
23571 v.copyOf(x)
23572 return true
23573 }
23574
23575
23576 for {
23577 c := auxIntToInt32(v.AuxInt)
23578 if v_0.Op != OpAMD64MOVQconst {
23579 break
23580 }
23581 d := auxIntToInt64(v_0.AuxInt)
23582 v.reset(OpAMD64MOVQconst)
23583 v.AuxInt = int64ToAuxInt(int64(c) ^ d)
23584 return true
23585 }
23586 return false
23587 }
23588 func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
23589 v_1 := v.Args[1]
23590 v_0 := v.Args[0]
23591
23592
23593
23594 for {
23595 valoff1 := auxIntToValAndOff(v.AuxInt)
23596 sym := auxToSym(v.Aux)
23597 if v_0.Op != OpAMD64ADDQconst {
23598 break
23599 }
23600 off2 := auxIntToInt32(v_0.AuxInt)
23601 base := v_0.Args[0]
23602 mem := v_1
23603 if !(ValAndOff(valoff1).canAdd32(off2)) {
23604 break
23605 }
23606 v.reset(OpAMD64XORQconstmodify)
23607 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23608 v.Aux = symToAux(sym)
23609 v.AddArg2(base, mem)
23610 return true
23611 }
23612
23613
23614
23615 for {
23616 valoff1 := auxIntToValAndOff(v.AuxInt)
23617 sym1 := auxToSym(v.Aux)
23618 if v_0.Op != OpAMD64LEAQ {
23619 break
23620 }
23621 off2 := auxIntToInt32(v_0.AuxInt)
23622 sym2 := auxToSym(v_0.Aux)
23623 base := v_0.Args[0]
23624 mem := v_1
23625 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23626 break
23627 }
23628 v.reset(OpAMD64XORQconstmodify)
23629 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23630 v.Aux = symToAux(mergeSym(sym1, sym2))
23631 v.AddArg2(base, mem)
23632 return true
23633 }
23634 return false
23635 }
23636 func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
23637 v_2 := v.Args[2]
23638 v_1 := v.Args[1]
23639 v_0 := v.Args[0]
23640 b := v.Block
23641 typ := &b.Func.Config.Types
23642
23643
23644
23645 for {
23646 off1 := auxIntToInt32(v.AuxInt)
23647 sym := auxToSym(v.Aux)
23648 val := v_0
23649 if v_1.Op != OpAMD64ADDQconst {
23650 break
23651 }
23652 off2 := auxIntToInt32(v_1.AuxInt)
23653 base := v_1.Args[0]
23654 mem := v_2
23655 if !(is32Bit(int64(off1) + int64(off2))) {
23656 break
23657 }
23658 v.reset(OpAMD64XORQload)
23659 v.AuxInt = int32ToAuxInt(off1 + off2)
23660 v.Aux = symToAux(sym)
23661 v.AddArg3(val, base, mem)
23662 return true
23663 }
23664
23665
23666
23667 for {
23668 off1 := auxIntToInt32(v.AuxInt)
23669 sym1 := auxToSym(v.Aux)
23670 val := v_0
23671 if v_1.Op != OpAMD64LEAQ {
23672 break
23673 }
23674 off2 := auxIntToInt32(v_1.AuxInt)
23675 sym2 := auxToSym(v_1.Aux)
23676 base := v_1.Args[0]
23677 mem := v_2
23678 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23679 break
23680 }
23681 v.reset(OpAMD64XORQload)
23682 v.AuxInt = int32ToAuxInt(off1 + off2)
23683 v.Aux = symToAux(mergeSym(sym1, sym2))
23684 v.AddArg3(val, base, mem)
23685 return true
23686 }
23687
23688
23689 for {
23690 off := auxIntToInt32(v.AuxInt)
23691 sym := auxToSym(v.Aux)
23692 x := v_0
23693 ptr := v_1
23694 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23695 break
23696 }
23697 y := v_2.Args[1]
23698 if ptr != v_2.Args[0] {
23699 break
23700 }
23701 v.reset(OpAMD64XORQ)
23702 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
23703 v0.AddArg(y)
23704 v.AddArg2(x, v0)
23705 return true
23706 }
23707 return false
23708 }
23709 func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
23710 v_2 := v.Args[2]
23711 v_1 := v.Args[1]
23712 v_0 := v.Args[0]
23713
23714
23715
23716 for {
23717 off1 := auxIntToInt32(v.AuxInt)
23718 sym := auxToSym(v.Aux)
23719 if v_0.Op != OpAMD64ADDQconst {
23720 break
23721 }
23722 off2 := auxIntToInt32(v_0.AuxInt)
23723 base := v_0.Args[0]
23724 val := v_1
23725 mem := v_2
23726 if !(is32Bit(int64(off1) + int64(off2))) {
23727 break
23728 }
23729 v.reset(OpAMD64XORQmodify)
23730 v.AuxInt = int32ToAuxInt(off1 + off2)
23731 v.Aux = symToAux(sym)
23732 v.AddArg3(base, val, mem)
23733 return true
23734 }
23735
23736
23737
23738 for {
23739 off1 := auxIntToInt32(v.AuxInt)
23740 sym1 := auxToSym(v.Aux)
23741 if v_0.Op != OpAMD64LEAQ {
23742 break
23743 }
23744 off2 := auxIntToInt32(v_0.AuxInt)
23745 sym2 := auxToSym(v_0.Aux)
23746 base := v_0.Args[0]
23747 val := v_1
23748 mem := v_2
23749 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23750 break
23751 }
23752 v.reset(OpAMD64XORQmodify)
23753 v.AuxInt = int32ToAuxInt(off1 + off2)
23754 v.Aux = symToAux(mergeSym(sym1, sym2))
23755 v.AddArg3(base, val, mem)
23756 return true
23757 }
23758 return false
23759 }
23760 func rewriteValueAMD64_OpAddr(v *Value) bool {
23761 v_0 := v.Args[0]
23762
23763
23764 for {
23765 sym := auxToSym(v.Aux)
23766 base := v_0
23767 v.reset(OpAMD64LEAQ)
23768 v.Aux = symToAux(sym)
23769 v.AddArg(base)
23770 return true
23771 }
23772 }
23773 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
23774 v_2 := v.Args[2]
23775 v_1 := v.Args[1]
23776 v_0 := v.Args[0]
23777 b := v.Block
23778 typ := &b.Func.Config.Types
23779
23780
23781 for {
23782 ptr := v_0
23783 val := v_1
23784 mem := v_2
23785 v.reset(OpAMD64AddTupleFirst32)
23786 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
23787 v0.AddArg3(val, ptr, mem)
23788 v.AddArg2(val, v0)
23789 return true
23790 }
23791 }
23792 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
23793 v_2 := v.Args[2]
23794 v_1 := v.Args[1]
23795 v_0 := v.Args[0]
23796 b := v.Block
23797 typ := &b.Func.Config.Types
23798
23799
23800 for {
23801 ptr := v_0
23802 val := v_1
23803 mem := v_2
23804 v.reset(OpAMD64AddTupleFirst64)
23805 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
23806 v0.AddArg3(val, ptr, mem)
23807 v.AddArg2(val, v0)
23808 return true
23809 }
23810 }
23811 func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
23812 v_2 := v.Args[2]
23813 v_1 := v.Args[1]
23814 v_0 := v.Args[0]
23815
23816
23817 for {
23818 ptr := v_0
23819 val := v_1
23820 mem := v_2
23821 v.reset(OpAMD64ANDLlock)
23822 v.AddArg3(ptr, val, mem)
23823 return true
23824 }
23825 }
23826 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
23827 v_2 := v.Args[2]
23828 v_1 := v.Args[1]
23829 v_0 := v.Args[0]
23830
23831
23832 for {
23833 ptr := v_0
23834 val := v_1
23835 mem := v_2
23836 v.reset(OpAMD64ANDBlock)
23837 v.AddArg3(ptr, val, mem)
23838 return true
23839 }
23840 }
23841 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
23842 v_3 := v.Args[3]
23843 v_2 := v.Args[2]
23844 v_1 := v.Args[1]
23845 v_0 := v.Args[0]
23846
23847
23848 for {
23849 ptr := v_0
23850 old := v_1
23851 new_ := v_2
23852 mem := v_3
23853 v.reset(OpAMD64CMPXCHGLlock)
23854 v.AddArg4(ptr, old, new_, mem)
23855 return true
23856 }
23857 }
23858 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
23859 v_3 := v.Args[3]
23860 v_2 := v.Args[2]
23861 v_1 := v.Args[1]
23862 v_0 := v.Args[0]
23863
23864
23865 for {
23866 ptr := v_0
23867 old := v_1
23868 new_ := v_2
23869 mem := v_3
23870 v.reset(OpAMD64CMPXCHGQlock)
23871 v.AddArg4(ptr, old, new_, mem)
23872 return true
23873 }
23874 }
23875 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
23876 v_2 := v.Args[2]
23877 v_1 := v.Args[1]
23878 v_0 := v.Args[0]
23879
23880
23881 for {
23882 ptr := v_0
23883 val := v_1
23884 mem := v_2
23885 v.reset(OpAMD64XCHGL)
23886 v.AddArg3(val, ptr, mem)
23887 return true
23888 }
23889 }
23890 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
23891 v_2 := v.Args[2]
23892 v_1 := v.Args[1]
23893 v_0 := v.Args[0]
23894
23895
23896 for {
23897 ptr := v_0
23898 val := v_1
23899 mem := v_2
23900 v.reset(OpAMD64XCHGQ)
23901 v.AddArg3(val, ptr, mem)
23902 return true
23903 }
23904 }
23905 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
23906 v_1 := v.Args[1]
23907 v_0 := v.Args[0]
23908
23909
23910 for {
23911 ptr := v_0
23912 mem := v_1
23913 v.reset(OpAMD64MOVLatomicload)
23914 v.AddArg2(ptr, mem)
23915 return true
23916 }
23917 }
23918 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
23919 v_1 := v.Args[1]
23920 v_0 := v.Args[0]
23921
23922
23923 for {
23924 ptr := v_0
23925 mem := v_1
23926 v.reset(OpAMD64MOVQatomicload)
23927 v.AddArg2(ptr, mem)
23928 return true
23929 }
23930 }
23931 func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
23932 v_1 := v.Args[1]
23933 v_0 := v.Args[0]
23934
23935
23936 for {
23937 ptr := v_0
23938 mem := v_1
23939 v.reset(OpAMD64MOVBatomicload)
23940 v.AddArg2(ptr, mem)
23941 return true
23942 }
23943 }
23944 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
23945 v_1 := v.Args[1]
23946 v_0 := v.Args[0]
23947
23948
23949 for {
23950 ptr := v_0
23951 mem := v_1
23952 v.reset(OpAMD64MOVQatomicload)
23953 v.AddArg2(ptr, mem)
23954 return true
23955 }
23956 }
23957 func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
23958 v_2 := v.Args[2]
23959 v_1 := v.Args[1]
23960 v_0 := v.Args[0]
23961
23962
23963 for {
23964 ptr := v_0
23965 val := v_1
23966 mem := v_2
23967 v.reset(OpAMD64ORLlock)
23968 v.AddArg3(ptr, val, mem)
23969 return true
23970 }
23971 }
23972 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
23973 v_2 := v.Args[2]
23974 v_1 := v.Args[1]
23975 v_0 := v.Args[0]
23976
23977
23978 for {
23979 ptr := v_0
23980 val := v_1
23981 mem := v_2
23982 v.reset(OpAMD64ORBlock)
23983 v.AddArg3(ptr, val, mem)
23984 return true
23985 }
23986 }
23987 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
23988 v_2 := v.Args[2]
23989 v_1 := v.Args[1]
23990 v_0 := v.Args[0]
23991 b := v.Block
23992 typ := &b.Func.Config.Types
23993
23994
23995 for {
23996 ptr := v_0
23997 val := v_1
23998 mem := v_2
23999 v.reset(OpSelect1)
24000 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
24001 v0.AddArg3(val, ptr, mem)
24002 v.AddArg(v0)
24003 return true
24004 }
24005 }
24006 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
24007 v_2 := v.Args[2]
24008 v_1 := v.Args[1]
24009 v_0 := v.Args[0]
24010 b := v.Block
24011 typ := &b.Func.Config.Types
24012
24013
24014 for {
24015 ptr := v_0
24016 val := v_1
24017 mem := v_2
24018 v.reset(OpSelect1)
24019 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
24020 v0.AddArg3(val, ptr, mem)
24021 v.AddArg(v0)
24022 return true
24023 }
24024 }
24025 func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
24026 v_2 := v.Args[2]
24027 v_1 := v.Args[1]
24028 v_0 := v.Args[0]
24029 b := v.Block
24030 typ := &b.Func.Config.Types
24031
24032
24033 for {
24034 ptr := v_0
24035 val := v_1
24036 mem := v_2
24037 v.reset(OpSelect1)
24038 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
24039 v0.AddArg3(val, ptr, mem)
24040 v.AddArg(v0)
24041 return true
24042 }
24043 }
24044 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
24045 v_2 := v.Args[2]
24046 v_1 := v.Args[1]
24047 v_0 := v.Args[0]
24048 b := v.Block
24049 typ := &b.Func.Config.Types
24050
24051
24052 for {
24053 ptr := v_0
24054 val := v_1
24055 mem := v_2
24056 v.reset(OpSelect1)
24057 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
24058 v0.AddArg3(val, ptr, mem)
24059 v.AddArg(v0)
24060 return true
24061 }
24062 }
24063 func rewriteValueAMD64_OpBitLen16(v *Value) bool {
24064 v_0 := v.Args[0]
24065 b := v.Block
24066 typ := &b.Func.Config.Types
24067
24068
24069
24070 for {
24071 x := v_0
24072 if !(buildcfg.GOAMD64 < 3) {
24073 break
24074 }
24075 v.reset(OpAMD64BSRL)
24076 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24077 v0.AuxInt = int32ToAuxInt(1)
24078 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
24079 v1.AddArg(x)
24080 v0.AddArg2(v1, v1)
24081 v.AddArg(v0)
24082 return true
24083 }
24084
24085
24086
24087 for {
24088 t := v.Type
24089 x := v_0
24090 if !(buildcfg.GOAMD64 >= 3) {
24091 break
24092 }
24093 v.reset(OpAMD64NEGQ)
24094 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24095 v0.AuxInt = int32ToAuxInt(-32)
24096 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24097 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type)
24098 v2.AddArg(x)
24099 v1.AddArg(v2)
24100 v0.AddArg(v1)
24101 v.AddArg(v0)
24102 return true
24103 }
24104 return false
24105 }
24106 func rewriteValueAMD64_OpBitLen32(v *Value) bool {
24107 v_0 := v.Args[0]
24108 b := v.Block
24109 typ := &b.Func.Config.Types
24110
24111
24112
24113 for {
24114 x := v_0
24115 if !(buildcfg.GOAMD64 < 3) {
24116 break
24117 }
24118 v.reset(OpSelect0)
24119 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24120 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
24121 v1.AuxInt = int32ToAuxInt(1)
24122 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
24123 v2.AddArg(x)
24124 v1.AddArg2(v2, v2)
24125 v0.AddArg(v1)
24126 v.AddArg(v0)
24127 return true
24128 }
24129
24130
24131
24132 for {
24133 t := v.Type
24134 x := v_0
24135 if !(buildcfg.GOAMD64 >= 3) {
24136 break
24137 }
24138 v.reset(OpAMD64NEGQ)
24139 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24140 v0.AuxInt = int32ToAuxInt(-32)
24141 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24142 v1.AddArg(x)
24143 v0.AddArg(v1)
24144 v.AddArg(v0)
24145 return true
24146 }
24147 return false
24148 }
24149 func rewriteValueAMD64_OpBitLen64(v *Value) bool {
24150 v_0 := v.Args[0]
24151 b := v.Block
24152 typ := &b.Func.Config.Types
24153
24154
24155
24156 for {
24157 t := v.Type
24158 x := v_0
24159 if !(buildcfg.GOAMD64 < 3) {
24160 break
24161 }
24162 v.reset(OpAMD64ADDQconst)
24163 v.AuxInt = int32ToAuxInt(1)
24164 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
24165 v1 := b.NewValue0(v.Pos, OpSelect0, t)
24166 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24167 v2.AddArg(x)
24168 v1.AddArg(v2)
24169 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
24170 v3.AuxInt = int64ToAuxInt(-1)
24171 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
24172 v4.AddArg(v2)
24173 v0.AddArg3(v1, v3, v4)
24174 v.AddArg(v0)
24175 return true
24176 }
24177
24178
24179
24180 for {
24181 t := v.Type
24182 x := v_0
24183 if !(buildcfg.GOAMD64 >= 3) {
24184 break
24185 }
24186 v.reset(OpAMD64NEGQ)
24187 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24188 v0.AuxInt = int32ToAuxInt(-64)
24189 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64)
24190 v1.AddArg(x)
24191 v0.AddArg(v1)
24192 v.AddArg(v0)
24193 return true
24194 }
24195 return false
24196 }
24197 func rewriteValueAMD64_OpBitLen8(v *Value) bool {
24198 v_0 := v.Args[0]
24199 b := v.Block
24200 typ := &b.Func.Config.Types
24201
24202
24203
24204 for {
24205 x := v_0
24206 if !(buildcfg.GOAMD64 < 3) {
24207 break
24208 }
24209 v.reset(OpAMD64BSRL)
24210 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24211 v0.AuxInt = int32ToAuxInt(1)
24212 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
24213 v1.AddArg(x)
24214 v0.AddArg2(v1, v1)
24215 v.AddArg(v0)
24216 return true
24217 }
24218
24219
24220
24221 for {
24222 t := v.Type
24223 x := v_0
24224 if !(buildcfg.GOAMD64 >= 3) {
24225 break
24226 }
24227 v.reset(OpAMD64NEGQ)
24228 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24229 v0.AuxInt = int32ToAuxInt(-32)
24230 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24231 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type)
24232 v2.AddArg(x)
24233 v1.AddArg(v2)
24234 v0.AddArg(v1)
24235 v.AddArg(v0)
24236 return true
24237 }
24238 return false
24239 }
24240 func rewriteValueAMD64_OpBswap16(v *Value) bool {
24241 v_0 := v.Args[0]
24242
24243
24244 for {
24245 x := v_0
24246 v.reset(OpAMD64ROLWconst)
24247 v.AuxInt = int8ToAuxInt(8)
24248 v.AddArg(x)
24249 return true
24250 }
24251 }
24252 func rewriteValueAMD64_OpCeil(v *Value) bool {
24253 v_0 := v.Args[0]
24254
24255
24256 for {
24257 x := v_0
24258 v.reset(OpAMD64ROUNDSD)
24259 v.AuxInt = int8ToAuxInt(2)
24260 v.AddArg(x)
24261 return true
24262 }
24263 }
24264 func rewriteValueAMD64_OpCondSelect(v *Value) bool {
24265 v_2 := v.Args[2]
24266 v_1 := v.Args[1]
24267 v_0 := v.Args[0]
24268 b := v.Block
24269 typ := &b.Func.Config.Types
24270
24271
24272
24273 for {
24274 t := v.Type
24275 x := v_0
24276 y := v_1
24277 if v_2.Op != OpAMD64SETEQ {
24278 break
24279 }
24280 cond := v_2.Args[0]
24281 if !(is64BitInt(t) || isPtr(t)) {
24282 break
24283 }
24284 v.reset(OpAMD64CMOVQEQ)
24285 v.AddArg3(y, x, cond)
24286 return true
24287 }
24288
24289
24290
24291 for {
24292 t := v.Type
24293 x := v_0
24294 y := v_1
24295 if v_2.Op != OpAMD64SETNE {
24296 break
24297 }
24298 cond := v_2.Args[0]
24299 if !(is64BitInt(t) || isPtr(t)) {
24300 break
24301 }
24302 v.reset(OpAMD64CMOVQNE)
24303 v.AddArg3(y, x, cond)
24304 return true
24305 }
24306
24307
24308
24309 for {
24310 t := v.Type
24311 x := v_0
24312 y := v_1
24313 if v_2.Op != OpAMD64SETL {
24314 break
24315 }
24316 cond := v_2.Args[0]
24317 if !(is64BitInt(t) || isPtr(t)) {
24318 break
24319 }
24320 v.reset(OpAMD64CMOVQLT)
24321 v.AddArg3(y, x, cond)
24322 return true
24323 }
24324
24325
24326
24327 for {
24328 t := v.Type
24329 x := v_0
24330 y := v_1
24331 if v_2.Op != OpAMD64SETG {
24332 break
24333 }
24334 cond := v_2.Args[0]
24335 if !(is64BitInt(t) || isPtr(t)) {
24336 break
24337 }
24338 v.reset(OpAMD64CMOVQGT)
24339 v.AddArg3(y, x, cond)
24340 return true
24341 }
24342
24343
24344
24345 for {
24346 t := v.Type
24347 x := v_0
24348 y := v_1
24349 if v_2.Op != OpAMD64SETLE {
24350 break
24351 }
24352 cond := v_2.Args[0]
24353 if !(is64BitInt(t) || isPtr(t)) {
24354 break
24355 }
24356 v.reset(OpAMD64CMOVQLE)
24357 v.AddArg3(y, x, cond)
24358 return true
24359 }
24360
24361
24362
24363 for {
24364 t := v.Type
24365 x := v_0
24366 y := v_1
24367 if v_2.Op != OpAMD64SETGE {
24368 break
24369 }
24370 cond := v_2.Args[0]
24371 if !(is64BitInt(t) || isPtr(t)) {
24372 break
24373 }
24374 v.reset(OpAMD64CMOVQGE)
24375 v.AddArg3(y, x, cond)
24376 return true
24377 }
24378
24379
24380
24381 for {
24382 t := v.Type
24383 x := v_0
24384 y := v_1
24385 if v_2.Op != OpAMD64SETA {
24386 break
24387 }
24388 cond := v_2.Args[0]
24389 if !(is64BitInt(t) || isPtr(t)) {
24390 break
24391 }
24392 v.reset(OpAMD64CMOVQHI)
24393 v.AddArg3(y, x, cond)
24394 return true
24395 }
24396
24397
24398
24399 for {
24400 t := v.Type
24401 x := v_0
24402 y := v_1
24403 if v_2.Op != OpAMD64SETB {
24404 break
24405 }
24406 cond := v_2.Args[0]
24407 if !(is64BitInt(t) || isPtr(t)) {
24408 break
24409 }
24410 v.reset(OpAMD64CMOVQCS)
24411 v.AddArg3(y, x, cond)
24412 return true
24413 }
24414
24415
24416
24417 for {
24418 t := v.Type
24419 x := v_0
24420 y := v_1
24421 if v_2.Op != OpAMD64SETAE {
24422 break
24423 }
24424 cond := v_2.Args[0]
24425 if !(is64BitInt(t) || isPtr(t)) {
24426 break
24427 }
24428 v.reset(OpAMD64CMOVQCC)
24429 v.AddArg3(y, x, cond)
24430 return true
24431 }
24432
24433
24434
24435 for {
24436 t := v.Type
24437 x := v_0
24438 y := v_1
24439 if v_2.Op != OpAMD64SETBE {
24440 break
24441 }
24442 cond := v_2.Args[0]
24443 if !(is64BitInt(t) || isPtr(t)) {
24444 break
24445 }
24446 v.reset(OpAMD64CMOVQLS)
24447 v.AddArg3(y, x, cond)
24448 return true
24449 }
24450
24451
24452
24453 for {
24454 t := v.Type
24455 x := v_0
24456 y := v_1
24457 if v_2.Op != OpAMD64SETEQF {
24458 break
24459 }
24460 cond := v_2.Args[0]
24461 if !(is64BitInt(t) || isPtr(t)) {
24462 break
24463 }
24464 v.reset(OpAMD64CMOVQEQF)
24465 v.AddArg3(y, x, cond)
24466 return true
24467 }
24468
24469
24470
24471 for {
24472 t := v.Type
24473 x := v_0
24474 y := v_1
24475 if v_2.Op != OpAMD64SETNEF {
24476 break
24477 }
24478 cond := v_2.Args[0]
24479 if !(is64BitInt(t) || isPtr(t)) {
24480 break
24481 }
24482 v.reset(OpAMD64CMOVQNEF)
24483 v.AddArg3(y, x, cond)
24484 return true
24485 }
24486
24487
24488
24489 for {
24490 t := v.Type
24491 x := v_0
24492 y := v_1
24493 if v_2.Op != OpAMD64SETGF {
24494 break
24495 }
24496 cond := v_2.Args[0]
24497 if !(is64BitInt(t) || isPtr(t)) {
24498 break
24499 }
24500 v.reset(OpAMD64CMOVQGTF)
24501 v.AddArg3(y, x, cond)
24502 return true
24503 }
24504
24505
24506
24507 for {
24508 t := v.Type
24509 x := v_0
24510 y := v_1
24511 if v_2.Op != OpAMD64SETGEF {
24512 break
24513 }
24514 cond := v_2.Args[0]
24515 if !(is64BitInt(t) || isPtr(t)) {
24516 break
24517 }
24518 v.reset(OpAMD64CMOVQGEF)
24519 v.AddArg3(y, x, cond)
24520 return true
24521 }
24522
24523
24524
24525 for {
24526 t := v.Type
24527 x := v_0
24528 y := v_1
24529 if v_2.Op != OpAMD64SETEQ {
24530 break
24531 }
24532 cond := v_2.Args[0]
24533 if !(is32BitInt(t)) {
24534 break
24535 }
24536 v.reset(OpAMD64CMOVLEQ)
24537 v.AddArg3(y, x, cond)
24538 return true
24539 }
24540
24541
24542
24543 for {
24544 t := v.Type
24545 x := v_0
24546 y := v_1
24547 if v_2.Op != OpAMD64SETNE {
24548 break
24549 }
24550 cond := v_2.Args[0]
24551 if !(is32BitInt(t)) {
24552 break
24553 }
24554 v.reset(OpAMD64CMOVLNE)
24555 v.AddArg3(y, x, cond)
24556 return true
24557 }
24558
24559
24560
24561 for {
24562 t := v.Type
24563 x := v_0
24564 y := v_1
24565 if v_2.Op != OpAMD64SETL {
24566 break
24567 }
24568 cond := v_2.Args[0]
24569 if !(is32BitInt(t)) {
24570 break
24571 }
24572 v.reset(OpAMD64CMOVLLT)
24573 v.AddArg3(y, x, cond)
24574 return true
24575 }
24576
24577
24578
24579 for {
24580 t := v.Type
24581 x := v_0
24582 y := v_1
24583 if v_2.Op != OpAMD64SETG {
24584 break
24585 }
24586 cond := v_2.Args[0]
24587 if !(is32BitInt(t)) {
24588 break
24589 }
24590 v.reset(OpAMD64CMOVLGT)
24591 v.AddArg3(y, x, cond)
24592 return true
24593 }
24594
24595
24596
24597 for {
24598 t := v.Type
24599 x := v_0
24600 y := v_1
24601 if v_2.Op != OpAMD64SETLE {
24602 break
24603 }
24604 cond := v_2.Args[0]
24605 if !(is32BitInt(t)) {
24606 break
24607 }
24608 v.reset(OpAMD64CMOVLLE)
24609 v.AddArg3(y, x, cond)
24610 return true
24611 }
24612
24613
24614
24615 for {
24616 t := v.Type
24617 x := v_0
24618 y := v_1
24619 if v_2.Op != OpAMD64SETGE {
24620 break
24621 }
24622 cond := v_2.Args[0]
24623 if !(is32BitInt(t)) {
24624 break
24625 }
24626 v.reset(OpAMD64CMOVLGE)
24627 v.AddArg3(y, x, cond)
24628 return true
24629 }
24630
24631
24632
24633 for {
24634 t := v.Type
24635 x := v_0
24636 y := v_1
24637 if v_2.Op != OpAMD64SETA {
24638 break
24639 }
24640 cond := v_2.Args[0]
24641 if !(is32BitInt(t)) {
24642 break
24643 }
24644 v.reset(OpAMD64CMOVLHI)
24645 v.AddArg3(y, x, cond)
24646 return true
24647 }
24648
24649
24650
24651 for {
24652 t := v.Type
24653 x := v_0
24654 y := v_1
24655 if v_2.Op != OpAMD64SETB {
24656 break
24657 }
24658 cond := v_2.Args[0]
24659 if !(is32BitInt(t)) {
24660 break
24661 }
24662 v.reset(OpAMD64CMOVLCS)
24663 v.AddArg3(y, x, cond)
24664 return true
24665 }
24666
24667
24668
24669 for {
24670 t := v.Type
24671 x := v_0
24672 y := v_1
24673 if v_2.Op != OpAMD64SETAE {
24674 break
24675 }
24676 cond := v_2.Args[0]
24677 if !(is32BitInt(t)) {
24678 break
24679 }
24680 v.reset(OpAMD64CMOVLCC)
24681 v.AddArg3(y, x, cond)
24682 return true
24683 }
24684
24685
24686
24687 for {
24688 t := v.Type
24689 x := v_0
24690 y := v_1
24691 if v_2.Op != OpAMD64SETBE {
24692 break
24693 }
24694 cond := v_2.Args[0]
24695 if !(is32BitInt(t)) {
24696 break
24697 }
24698 v.reset(OpAMD64CMOVLLS)
24699 v.AddArg3(y, x, cond)
24700 return true
24701 }
24702
24703
24704
24705 for {
24706 t := v.Type
24707 x := v_0
24708 y := v_1
24709 if v_2.Op != OpAMD64SETEQF {
24710 break
24711 }
24712 cond := v_2.Args[0]
24713 if !(is32BitInt(t)) {
24714 break
24715 }
24716 v.reset(OpAMD64CMOVLEQF)
24717 v.AddArg3(y, x, cond)
24718 return true
24719 }
24720
24721
24722
24723 for {
24724 t := v.Type
24725 x := v_0
24726 y := v_1
24727 if v_2.Op != OpAMD64SETNEF {
24728 break
24729 }
24730 cond := v_2.Args[0]
24731 if !(is32BitInt(t)) {
24732 break
24733 }
24734 v.reset(OpAMD64CMOVLNEF)
24735 v.AddArg3(y, x, cond)
24736 return true
24737 }
24738
24739
24740
24741 for {
24742 t := v.Type
24743 x := v_0
24744 y := v_1
24745 if v_2.Op != OpAMD64SETGF {
24746 break
24747 }
24748 cond := v_2.Args[0]
24749 if !(is32BitInt(t)) {
24750 break
24751 }
24752 v.reset(OpAMD64CMOVLGTF)
24753 v.AddArg3(y, x, cond)
24754 return true
24755 }
24756
24757
24758
24759 for {
24760 t := v.Type
24761 x := v_0
24762 y := v_1
24763 if v_2.Op != OpAMD64SETGEF {
24764 break
24765 }
24766 cond := v_2.Args[0]
24767 if !(is32BitInt(t)) {
24768 break
24769 }
24770 v.reset(OpAMD64CMOVLGEF)
24771 v.AddArg3(y, x, cond)
24772 return true
24773 }
24774
24775
24776
24777 for {
24778 t := v.Type
24779 x := v_0
24780 y := v_1
24781 if v_2.Op != OpAMD64SETEQ {
24782 break
24783 }
24784 cond := v_2.Args[0]
24785 if !(is16BitInt(t)) {
24786 break
24787 }
24788 v.reset(OpAMD64CMOVWEQ)
24789 v.AddArg3(y, x, cond)
24790 return true
24791 }
24792
24793
24794
24795 for {
24796 t := v.Type
24797 x := v_0
24798 y := v_1
24799 if v_2.Op != OpAMD64SETNE {
24800 break
24801 }
24802 cond := v_2.Args[0]
24803 if !(is16BitInt(t)) {
24804 break
24805 }
24806 v.reset(OpAMD64CMOVWNE)
24807 v.AddArg3(y, x, cond)
24808 return true
24809 }
24810
24811
24812
24813 for {
24814 t := v.Type
24815 x := v_0
24816 y := v_1
24817 if v_2.Op != OpAMD64SETL {
24818 break
24819 }
24820 cond := v_2.Args[0]
24821 if !(is16BitInt(t)) {
24822 break
24823 }
24824 v.reset(OpAMD64CMOVWLT)
24825 v.AddArg3(y, x, cond)
24826 return true
24827 }
24828
24829
24830
24831 for {
24832 t := v.Type
24833 x := v_0
24834 y := v_1
24835 if v_2.Op != OpAMD64SETG {
24836 break
24837 }
24838 cond := v_2.Args[0]
24839 if !(is16BitInt(t)) {
24840 break
24841 }
24842 v.reset(OpAMD64CMOVWGT)
24843 v.AddArg3(y, x, cond)
24844 return true
24845 }
24846
24847
24848
24849 for {
24850 t := v.Type
24851 x := v_0
24852 y := v_1
24853 if v_2.Op != OpAMD64SETLE {
24854 break
24855 }
24856 cond := v_2.Args[0]
24857 if !(is16BitInt(t)) {
24858 break
24859 }
24860 v.reset(OpAMD64CMOVWLE)
24861 v.AddArg3(y, x, cond)
24862 return true
24863 }
24864
24865
24866
24867 for {
24868 t := v.Type
24869 x := v_0
24870 y := v_1
24871 if v_2.Op != OpAMD64SETGE {
24872 break
24873 }
24874 cond := v_2.Args[0]
24875 if !(is16BitInt(t)) {
24876 break
24877 }
24878 v.reset(OpAMD64CMOVWGE)
24879 v.AddArg3(y, x, cond)
24880 return true
24881 }
24882
24883
24884
24885 for {
24886 t := v.Type
24887 x := v_0
24888 y := v_1
24889 if v_2.Op != OpAMD64SETA {
24890 break
24891 }
24892 cond := v_2.Args[0]
24893 if !(is16BitInt(t)) {
24894 break
24895 }
24896 v.reset(OpAMD64CMOVWHI)
24897 v.AddArg3(y, x, cond)
24898 return true
24899 }
24900
24901
24902
24903 for {
24904 t := v.Type
24905 x := v_0
24906 y := v_1
24907 if v_2.Op != OpAMD64SETB {
24908 break
24909 }
24910 cond := v_2.Args[0]
24911 if !(is16BitInt(t)) {
24912 break
24913 }
24914 v.reset(OpAMD64CMOVWCS)
24915 v.AddArg3(y, x, cond)
24916 return true
24917 }
24918
24919
24920
24921 for {
24922 t := v.Type
24923 x := v_0
24924 y := v_1
24925 if v_2.Op != OpAMD64SETAE {
24926 break
24927 }
24928 cond := v_2.Args[0]
24929 if !(is16BitInt(t)) {
24930 break
24931 }
24932 v.reset(OpAMD64CMOVWCC)
24933 v.AddArg3(y, x, cond)
24934 return true
24935 }
24936
24937
24938
24939 for {
24940 t := v.Type
24941 x := v_0
24942 y := v_1
24943 if v_2.Op != OpAMD64SETBE {
24944 break
24945 }
24946 cond := v_2.Args[0]
24947 if !(is16BitInt(t)) {
24948 break
24949 }
24950 v.reset(OpAMD64CMOVWLS)
24951 v.AddArg3(y, x, cond)
24952 return true
24953 }
24954
24955
24956
24957 for {
24958 t := v.Type
24959 x := v_0
24960 y := v_1
24961 if v_2.Op != OpAMD64SETEQF {
24962 break
24963 }
24964 cond := v_2.Args[0]
24965 if !(is16BitInt(t)) {
24966 break
24967 }
24968 v.reset(OpAMD64CMOVWEQF)
24969 v.AddArg3(y, x, cond)
24970 return true
24971 }
24972
24973
24974
24975 for {
24976 t := v.Type
24977 x := v_0
24978 y := v_1
24979 if v_2.Op != OpAMD64SETNEF {
24980 break
24981 }
24982 cond := v_2.Args[0]
24983 if !(is16BitInt(t)) {
24984 break
24985 }
24986 v.reset(OpAMD64CMOVWNEF)
24987 v.AddArg3(y, x, cond)
24988 return true
24989 }
24990
24991
24992
24993 for {
24994 t := v.Type
24995 x := v_0
24996 y := v_1
24997 if v_2.Op != OpAMD64SETGF {
24998 break
24999 }
25000 cond := v_2.Args[0]
25001 if !(is16BitInt(t)) {
25002 break
25003 }
25004 v.reset(OpAMD64CMOVWGTF)
25005 v.AddArg3(y, x, cond)
25006 return true
25007 }
25008
25009
25010
25011 for {
25012 t := v.Type
25013 x := v_0
25014 y := v_1
25015 if v_2.Op != OpAMD64SETGEF {
25016 break
25017 }
25018 cond := v_2.Args[0]
25019 if !(is16BitInt(t)) {
25020 break
25021 }
25022 v.reset(OpAMD64CMOVWGEF)
25023 v.AddArg3(y, x, cond)
25024 return true
25025 }
25026
25027
25028
25029 for {
25030 t := v.Type
25031 x := v_0
25032 y := v_1
25033 check := v_2
25034 if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
25035 break
25036 }
25037 v.reset(OpCondSelect)
25038 v.Type = t
25039 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
25040 v0.AddArg(check)
25041 v.AddArg3(x, y, v0)
25042 return true
25043 }
25044
25045
25046
25047 for {
25048 t := v.Type
25049 x := v_0
25050 y := v_1
25051 check := v_2
25052 if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
25053 break
25054 }
25055 v.reset(OpCondSelect)
25056 v.Type = t
25057 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
25058 v0.AddArg(check)
25059 v.AddArg3(x, y, v0)
25060 return true
25061 }
25062
25063
25064
25065 for {
25066 t := v.Type
25067 x := v_0
25068 y := v_1
25069 check := v_2
25070 if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
25071 break
25072 }
25073 v.reset(OpCondSelect)
25074 v.Type = t
25075 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
25076 v0.AddArg(check)
25077 v.AddArg3(x, y, v0)
25078 return true
25079 }
25080
25081
25082
25083 for {
25084 t := v.Type
25085 x := v_0
25086 y := v_1
25087 check := v_2
25088 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
25089 break
25090 }
25091 v.reset(OpAMD64CMOVQNE)
25092 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25093 v0.AuxInt = int32ToAuxInt(0)
25094 v0.AddArg(check)
25095 v.AddArg3(y, x, v0)
25096 return true
25097 }
25098
25099
25100
25101 for {
25102 t := v.Type
25103 x := v_0
25104 y := v_1
25105 check := v_2
25106 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
25107 break
25108 }
25109 v.reset(OpAMD64CMOVLNE)
25110 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25111 v0.AuxInt = int32ToAuxInt(0)
25112 v0.AddArg(check)
25113 v.AddArg3(y, x, v0)
25114 return true
25115 }
25116
25117
25118
25119 for {
25120 t := v.Type
25121 x := v_0
25122 y := v_1
25123 check := v_2
25124 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
25125 break
25126 }
25127 v.reset(OpAMD64CMOVWNE)
25128 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25129 v0.AuxInt = int32ToAuxInt(0)
25130 v0.AddArg(check)
25131 v.AddArg3(y, x, v0)
25132 return true
25133 }
25134 return false
25135 }
25136 func rewriteValueAMD64_OpConst16(v *Value) bool {
25137
25138
25139 for {
25140 c := auxIntToInt16(v.AuxInt)
25141 v.reset(OpAMD64MOVLconst)
25142 v.AuxInt = int32ToAuxInt(int32(c))
25143 return true
25144 }
25145 }
25146 func rewriteValueAMD64_OpConst8(v *Value) bool {
25147
25148
25149 for {
25150 c := auxIntToInt8(v.AuxInt)
25151 v.reset(OpAMD64MOVLconst)
25152 v.AuxInt = int32ToAuxInt(int32(c))
25153 return true
25154 }
25155 }
25156 func rewriteValueAMD64_OpConstBool(v *Value) bool {
25157
25158
25159 for {
25160 c := auxIntToBool(v.AuxInt)
25161 v.reset(OpAMD64MOVLconst)
25162 v.AuxInt = int32ToAuxInt(b2i32(c))
25163 return true
25164 }
25165 }
25166 func rewriteValueAMD64_OpConstNil(v *Value) bool {
25167
25168
25169 for {
25170 v.reset(OpAMD64MOVQconst)
25171 v.AuxInt = int64ToAuxInt(0)
25172 return true
25173 }
25174 }
25175 func rewriteValueAMD64_OpCtz16(v *Value) bool {
25176 v_0 := v.Args[0]
25177 b := v.Block
25178 typ := &b.Func.Config.Types
25179
25180
25181 for {
25182 x := v_0
25183 v.reset(OpAMD64BSFL)
25184 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25185 v0.AuxInt = int32ToAuxInt(1 << 16)
25186 v0.AddArg(x)
25187 v.AddArg(v0)
25188 return true
25189 }
25190 }
25191 func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool {
25192 v_0 := v.Args[0]
25193
25194
25195
25196 for {
25197 x := v_0
25198 if !(buildcfg.GOAMD64 >= 3) {
25199 break
25200 }
25201 v.reset(OpAMD64TZCNTL)
25202 v.AddArg(x)
25203 return true
25204 }
25205
25206
25207
25208 for {
25209 x := v_0
25210 if !(buildcfg.GOAMD64 < 3) {
25211 break
25212 }
25213 v.reset(OpAMD64BSFL)
25214 v.AddArg(x)
25215 return true
25216 }
25217 return false
25218 }
25219 func rewriteValueAMD64_OpCtz32(v *Value) bool {
25220 v_0 := v.Args[0]
25221 b := v.Block
25222 typ := &b.Func.Config.Types
25223
25224
25225
25226 for {
25227 x := v_0
25228 if !(buildcfg.GOAMD64 >= 3) {
25229 break
25230 }
25231 v.reset(OpAMD64TZCNTL)
25232 v.AddArg(x)
25233 return true
25234 }
25235
25236
25237
25238 for {
25239 x := v_0
25240 if !(buildcfg.GOAMD64 < 3) {
25241 break
25242 }
25243 v.reset(OpSelect0)
25244 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25245 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
25246 v1.AuxInt = int8ToAuxInt(32)
25247 v1.AddArg(x)
25248 v0.AddArg(v1)
25249 v.AddArg(v0)
25250 return true
25251 }
25252 return false
25253 }
25254 func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool {
25255 v_0 := v.Args[0]
25256
25257
25258
25259 for {
25260 x := v_0
25261 if !(buildcfg.GOAMD64 >= 3) {
25262 break
25263 }
25264 v.reset(OpAMD64TZCNTL)
25265 v.AddArg(x)
25266 return true
25267 }
25268
25269
25270
25271 for {
25272 x := v_0
25273 if !(buildcfg.GOAMD64 < 3) {
25274 break
25275 }
25276 v.reset(OpAMD64BSFL)
25277 v.AddArg(x)
25278 return true
25279 }
25280 return false
25281 }
25282 func rewriteValueAMD64_OpCtz64(v *Value) bool {
25283 v_0 := v.Args[0]
25284 b := v.Block
25285 typ := &b.Func.Config.Types
25286
25287
25288
25289 for {
25290 x := v_0
25291 if !(buildcfg.GOAMD64 >= 3) {
25292 break
25293 }
25294 v.reset(OpAMD64TZCNTQ)
25295 v.AddArg(x)
25296 return true
25297 }
25298
25299
25300
25301 for {
25302 t := v.Type
25303 x := v_0
25304 if !(buildcfg.GOAMD64 < 3) {
25305 break
25306 }
25307 v.reset(OpAMD64CMOVQEQ)
25308 v0 := b.NewValue0(v.Pos, OpSelect0, t)
25309 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25310 v1.AddArg(x)
25311 v0.AddArg(v1)
25312 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
25313 v2.AuxInt = int64ToAuxInt(64)
25314 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
25315 v3.AddArg(v1)
25316 v.AddArg3(v0, v2, v3)
25317 return true
25318 }
25319 return false
25320 }
25321 func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
25322 v_0 := v.Args[0]
25323 b := v.Block
25324 typ := &b.Func.Config.Types
25325
25326
25327
25328 for {
25329 x := v_0
25330 if !(buildcfg.GOAMD64 >= 3) {
25331 break
25332 }
25333 v.reset(OpAMD64TZCNTQ)
25334 v.AddArg(x)
25335 return true
25336 }
25337
25338
25339
25340 for {
25341 x := v_0
25342 if !(buildcfg.GOAMD64 < 3) {
25343 break
25344 }
25345 v.reset(OpSelect0)
25346 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25347 v0.AddArg(x)
25348 v.AddArg(v0)
25349 return true
25350 }
25351 return false
25352 }
25353 func rewriteValueAMD64_OpCtz8(v *Value) bool {
25354 v_0 := v.Args[0]
25355 b := v.Block
25356 typ := &b.Func.Config.Types
25357
25358
25359 for {
25360 x := v_0
25361 v.reset(OpAMD64BSFL)
25362 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25363 v0.AuxInt = int32ToAuxInt(1 << 8)
25364 v0.AddArg(x)
25365 v.AddArg(v0)
25366 return true
25367 }
25368 }
25369 func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool {
25370 v_0 := v.Args[0]
25371
25372
25373
25374 for {
25375 x := v_0
25376 if !(buildcfg.GOAMD64 >= 3) {
25377 break
25378 }
25379 v.reset(OpAMD64TZCNTL)
25380 v.AddArg(x)
25381 return true
25382 }
25383
25384
25385
25386 for {
25387 x := v_0
25388 if !(buildcfg.GOAMD64 < 3) {
25389 break
25390 }
25391 v.reset(OpAMD64BSFL)
25392 v.AddArg(x)
25393 return true
25394 }
25395 return false
25396 }
25397 func rewriteValueAMD64_OpDiv16(v *Value) bool {
25398 v_1 := v.Args[1]
25399 v_0 := v.Args[0]
25400 b := v.Block
25401 typ := &b.Func.Config.Types
25402
25403
25404 for {
25405 a := auxIntToBool(v.AuxInt)
25406 x := v_0
25407 y := v_1
25408 v.reset(OpSelect0)
25409 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
25410 v0.AuxInt = boolToAuxInt(a)
25411 v0.AddArg2(x, y)
25412 v.AddArg(v0)
25413 return true
25414 }
25415 }
25416 func rewriteValueAMD64_OpDiv16u(v *Value) bool {
25417 v_1 := v.Args[1]
25418 v_0 := v.Args[0]
25419 b := v.Block
25420 typ := &b.Func.Config.Types
25421
25422
25423 for {
25424 x := v_0
25425 y := v_1
25426 v.reset(OpSelect0)
25427 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
25428 v0.AddArg2(x, y)
25429 v.AddArg(v0)
25430 return true
25431 }
25432 }
25433 func rewriteValueAMD64_OpDiv32(v *Value) bool {
25434 v_1 := v.Args[1]
25435 v_0 := v.Args[0]
25436 b := v.Block
25437 typ := &b.Func.Config.Types
25438
25439
25440 for {
25441 a := auxIntToBool(v.AuxInt)
25442 x := v_0
25443 y := v_1
25444 v.reset(OpSelect0)
25445 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
25446 v0.AuxInt = boolToAuxInt(a)
25447 v0.AddArg2(x, y)
25448 v.AddArg(v0)
25449 return true
25450 }
25451 }
25452 func rewriteValueAMD64_OpDiv32u(v *Value) bool {
25453 v_1 := v.Args[1]
25454 v_0 := v.Args[0]
25455 b := v.Block
25456 typ := &b.Func.Config.Types
25457
25458
25459 for {
25460 x := v_0
25461 y := v_1
25462 v.reset(OpSelect0)
25463 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
25464 v0.AddArg2(x, y)
25465 v.AddArg(v0)
25466 return true
25467 }
25468 }
25469 func rewriteValueAMD64_OpDiv64(v *Value) bool {
25470 v_1 := v.Args[1]
25471 v_0 := v.Args[0]
25472 b := v.Block
25473 typ := &b.Func.Config.Types
25474
25475
25476 for {
25477 a := auxIntToBool(v.AuxInt)
25478 x := v_0
25479 y := v_1
25480 v.reset(OpSelect0)
25481 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
25482 v0.AuxInt = boolToAuxInt(a)
25483 v0.AddArg2(x, y)
25484 v.AddArg(v0)
25485 return true
25486 }
25487 }
25488 func rewriteValueAMD64_OpDiv64u(v *Value) bool {
25489 v_1 := v.Args[1]
25490 v_0 := v.Args[0]
25491 b := v.Block
25492 typ := &b.Func.Config.Types
25493
25494
25495 for {
25496 x := v_0
25497 y := v_1
25498 v.reset(OpSelect0)
25499 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
25500 v0.AddArg2(x, y)
25501 v.AddArg(v0)
25502 return true
25503 }
25504 }
25505 func rewriteValueAMD64_OpDiv8(v *Value) bool {
25506 v_1 := v.Args[1]
25507 v_0 := v.Args[0]
25508 b := v.Block
25509 typ := &b.Func.Config.Types
25510
25511
25512 for {
25513 x := v_0
25514 y := v_1
25515 v.reset(OpSelect0)
25516 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
25517 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
25518 v1.AddArg(x)
25519 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
25520 v2.AddArg(y)
25521 v0.AddArg2(v1, v2)
25522 v.AddArg(v0)
25523 return true
25524 }
25525 }
25526 func rewriteValueAMD64_OpDiv8u(v *Value) bool {
25527 v_1 := v.Args[1]
25528 v_0 := v.Args[0]
25529 b := v.Block
25530 typ := &b.Func.Config.Types
25531
25532
25533 for {
25534 x := v_0
25535 y := v_1
25536 v.reset(OpSelect0)
25537 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
25538 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
25539 v1.AddArg(x)
25540 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
25541 v2.AddArg(y)
25542 v0.AddArg2(v1, v2)
25543 v.AddArg(v0)
25544 return true
25545 }
25546 }
25547 func rewriteValueAMD64_OpEq16(v *Value) bool {
25548 v_1 := v.Args[1]
25549 v_0 := v.Args[0]
25550 b := v.Block
25551
25552
25553 for {
25554 x := v_0
25555 y := v_1
25556 v.reset(OpAMD64SETEQ)
25557 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25558 v0.AddArg2(x, y)
25559 v.AddArg(v0)
25560 return true
25561 }
25562 }
25563 func rewriteValueAMD64_OpEq32(v *Value) bool {
25564 v_1 := v.Args[1]
25565 v_0 := v.Args[0]
25566 b := v.Block
25567
25568
25569 for {
25570 x := v_0
25571 y := v_1
25572 v.reset(OpAMD64SETEQ)
25573 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25574 v0.AddArg2(x, y)
25575 v.AddArg(v0)
25576 return true
25577 }
25578 }
25579 func rewriteValueAMD64_OpEq32F(v *Value) bool {
25580 v_1 := v.Args[1]
25581 v_0 := v.Args[0]
25582 b := v.Block
25583
25584
25585 for {
25586 x := v_0
25587 y := v_1
25588 v.reset(OpAMD64SETEQF)
25589 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
25590 v0.AddArg2(x, y)
25591 v.AddArg(v0)
25592 return true
25593 }
25594 }
25595 func rewriteValueAMD64_OpEq64(v *Value) bool {
25596 v_1 := v.Args[1]
25597 v_0 := v.Args[0]
25598 b := v.Block
25599
25600
25601 for {
25602 x := v_0
25603 y := v_1
25604 v.reset(OpAMD64SETEQ)
25605 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25606 v0.AddArg2(x, y)
25607 v.AddArg(v0)
25608 return true
25609 }
25610 }
25611 func rewriteValueAMD64_OpEq64F(v *Value) bool {
25612 v_1 := v.Args[1]
25613 v_0 := v.Args[0]
25614 b := v.Block
25615
25616
25617 for {
25618 x := v_0
25619 y := v_1
25620 v.reset(OpAMD64SETEQF)
25621 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
25622 v0.AddArg2(x, y)
25623 v.AddArg(v0)
25624 return true
25625 }
25626 }
25627 func rewriteValueAMD64_OpEq8(v *Value) bool {
25628 v_1 := v.Args[1]
25629 v_0 := v.Args[0]
25630 b := v.Block
25631
25632
25633 for {
25634 x := v_0
25635 y := v_1
25636 v.reset(OpAMD64SETEQ)
25637 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25638 v0.AddArg2(x, y)
25639 v.AddArg(v0)
25640 return true
25641 }
25642 }
25643 func rewriteValueAMD64_OpEqB(v *Value) bool {
25644 v_1 := v.Args[1]
25645 v_0 := v.Args[0]
25646 b := v.Block
25647
25648
25649 for {
25650 x := v_0
25651 y := v_1
25652 v.reset(OpAMD64SETEQ)
25653 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25654 v0.AddArg2(x, y)
25655 v.AddArg(v0)
25656 return true
25657 }
25658 }
25659 func rewriteValueAMD64_OpEqPtr(v *Value) bool {
25660 v_1 := v.Args[1]
25661 v_0 := v.Args[0]
25662 b := v.Block
25663
25664
25665 for {
25666 x := v_0
25667 y := v_1
25668 v.reset(OpAMD64SETEQ)
25669 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25670 v0.AddArg2(x, y)
25671 v.AddArg(v0)
25672 return true
25673 }
25674 }
25675 func rewriteValueAMD64_OpFMA(v *Value) bool {
25676 v_2 := v.Args[2]
25677 v_1 := v.Args[1]
25678 v_0 := v.Args[0]
25679
25680
25681 for {
25682 x := v_0
25683 y := v_1
25684 z := v_2
25685 v.reset(OpAMD64VFMADD231SD)
25686 v.AddArg3(z, x, y)
25687 return true
25688 }
25689 }
25690 func rewriteValueAMD64_OpFloor(v *Value) bool {
25691 v_0 := v.Args[0]
25692
25693
25694 for {
25695 x := v_0
25696 v.reset(OpAMD64ROUNDSD)
25697 v.AuxInt = int8ToAuxInt(1)
25698 v.AddArg(x)
25699 return true
25700 }
25701 }
25702 func rewriteValueAMD64_OpGetG(v *Value) bool {
25703 v_0 := v.Args[0]
25704
25705
25706
25707 for {
25708 mem := v_0
25709 if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
25710 break
25711 }
25712 v.reset(OpAMD64LoweredGetG)
25713 v.AddArg(mem)
25714 return true
25715 }
25716 return false
25717 }
25718 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
25719 b := v.Block
25720 typ := &b.Func.Config.Types
25721
25722
25723 for {
25724 s := auxToSym(v.Aux)
25725 v.reset(OpAMD64SETNE)
25726 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
25727 v0.AuxInt = int32ToAuxInt(0)
25728 v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
25729 v1.Aux = symToAux(s)
25730 v0.AddArg(v1)
25731 v.AddArg(v0)
25732 return true
25733 }
25734 }
25735 func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
25736 v_1 := v.Args[1]
25737 v_0 := v.Args[0]
25738 b := v.Block
25739
25740
25741 for {
25742 idx := v_0
25743 len := v_1
25744 v.reset(OpAMD64SETB)
25745 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25746 v0.AddArg2(idx, len)
25747 v.AddArg(v0)
25748 return true
25749 }
25750 }
25751 func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
25752 v_0 := v.Args[0]
25753 b := v.Block
25754
25755
25756 for {
25757 p := v_0
25758 v.reset(OpAMD64SETNE)
25759 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
25760 v0.AddArg2(p, p)
25761 v.AddArg(v0)
25762 return true
25763 }
25764 }
25765 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
25766 v_1 := v.Args[1]
25767 v_0 := v.Args[0]
25768 b := v.Block
25769
25770
25771 for {
25772 idx := v_0
25773 len := v_1
25774 v.reset(OpAMD64SETBE)
25775 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25776 v0.AddArg2(idx, len)
25777 v.AddArg(v0)
25778 return true
25779 }
25780 }
25781 func rewriteValueAMD64_OpLeq16(v *Value) bool {
25782 v_1 := v.Args[1]
25783 v_0 := v.Args[0]
25784 b := v.Block
25785
25786
25787 for {
25788 x := v_0
25789 y := v_1
25790 v.reset(OpAMD64SETLE)
25791 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25792 v0.AddArg2(x, y)
25793 v.AddArg(v0)
25794 return true
25795 }
25796 }
25797 func rewriteValueAMD64_OpLeq16U(v *Value) bool {
25798 v_1 := v.Args[1]
25799 v_0 := v.Args[0]
25800 b := v.Block
25801
25802
25803 for {
25804 x := v_0
25805 y := v_1
25806 v.reset(OpAMD64SETBE)
25807 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25808 v0.AddArg2(x, y)
25809 v.AddArg(v0)
25810 return true
25811 }
25812 }
25813 func rewriteValueAMD64_OpLeq32(v *Value) bool {
25814 v_1 := v.Args[1]
25815 v_0 := v.Args[0]
25816 b := v.Block
25817
25818
25819 for {
25820 x := v_0
25821 y := v_1
25822 v.reset(OpAMD64SETLE)
25823 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25824 v0.AddArg2(x, y)
25825 v.AddArg(v0)
25826 return true
25827 }
25828 }
25829 func rewriteValueAMD64_OpLeq32F(v *Value) bool {
25830 v_1 := v.Args[1]
25831 v_0 := v.Args[0]
25832 b := v.Block
25833
25834
25835 for {
25836 x := v_0
25837 y := v_1
25838 v.reset(OpAMD64SETGEF)
25839 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
25840 v0.AddArg2(y, x)
25841 v.AddArg(v0)
25842 return true
25843 }
25844 }
25845 func rewriteValueAMD64_OpLeq32U(v *Value) bool {
25846 v_1 := v.Args[1]
25847 v_0 := v.Args[0]
25848 b := v.Block
25849
25850
25851 for {
25852 x := v_0
25853 y := v_1
25854 v.reset(OpAMD64SETBE)
25855 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25856 v0.AddArg2(x, y)
25857 v.AddArg(v0)
25858 return true
25859 }
25860 }
25861 func rewriteValueAMD64_OpLeq64(v *Value) bool {
25862 v_1 := v.Args[1]
25863 v_0 := v.Args[0]
25864 b := v.Block
25865
25866
25867 for {
25868 x := v_0
25869 y := v_1
25870 v.reset(OpAMD64SETLE)
25871 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25872 v0.AddArg2(x, y)
25873 v.AddArg(v0)
25874 return true
25875 }
25876 }
25877 func rewriteValueAMD64_OpLeq64F(v *Value) bool {
25878 v_1 := v.Args[1]
25879 v_0 := v.Args[0]
25880 b := v.Block
25881
25882
25883 for {
25884 x := v_0
25885 y := v_1
25886 v.reset(OpAMD64SETGEF)
25887 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
25888 v0.AddArg2(y, x)
25889 v.AddArg(v0)
25890 return true
25891 }
25892 }
25893 func rewriteValueAMD64_OpLeq64U(v *Value) bool {
25894 v_1 := v.Args[1]
25895 v_0 := v.Args[0]
25896 b := v.Block
25897
25898
25899 for {
25900 x := v_0
25901 y := v_1
25902 v.reset(OpAMD64SETBE)
25903 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25904 v0.AddArg2(x, y)
25905 v.AddArg(v0)
25906 return true
25907 }
25908 }
25909 func rewriteValueAMD64_OpLeq8(v *Value) bool {
25910 v_1 := v.Args[1]
25911 v_0 := v.Args[0]
25912 b := v.Block
25913
25914
25915 for {
25916 x := v_0
25917 y := v_1
25918 v.reset(OpAMD64SETLE)
25919 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25920 v0.AddArg2(x, y)
25921 v.AddArg(v0)
25922 return true
25923 }
25924 }
25925 func rewriteValueAMD64_OpLeq8U(v *Value) bool {
25926 v_1 := v.Args[1]
25927 v_0 := v.Args[0]
25928 b := v.Block
25929
25930
25931 for {
25932 x := v_0
25933 y := v_1
25934 v.reset(OpAMD64SETBE)
25935 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25936 v0.AddArg2(x, y)
25937 v.AddArg(v0)
25938 return true
25939 }
25940 }
25941 func rewriteValueAMD64_OpLess16(v *Value) bool {
25942 v_1 := v.Args[1]
25943 v_0 := v.Args[0]
25944 b := v.Block
25945
25946
25947 for {
25948 x := v_0
25949 y := v_1
25950 v.reset(OpAMD64SETL)
25951 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25952 v0.AddArg2(x, y)
25953 v.AddArg(v0)
25954 return true
25955 }
25956 }
25957 func rewriteValueAMD64_OpLess16U(v *Value) bool {
25958 v_1 := v.Args[1]
25959 v_0 := v.Args[0]
25960 b := v.Block
25961
25962
25963 for {
25964 x := v_0
25965 y := v_1
25966 v.reset(OpAMD64SETB)
25967 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25968 v0.AddArg2(x, y)
25969 v.AddArg(v0)
25970 return true
25971 }
25972 }
25973 func rewriteValueAMD64_OpLess32(v *Value) bool {
25974 v_1 := v.Args[1]
25975 v_0 := v.Args[0]
25976 b := v.Block
25977
25978
25979 for {
25980 x := v_0
25981 y := v_1
25982 v.reset(OpAMD64SETL)
25983 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25984 v0.AddArg2(x, y)
25985 v.AddArg(v0)
25986 return true
25987 }
25988 }
25989 func rewriteValueAMD64_OpLess32F(v *Value) bool {
25990 v_1 := v.Args[1]
25991 v_0 := v.Args[0]
25992 b := v.Block
25993
25994
25995 for {
25996 x := v_0
25997 y := v_1
25998 v.reset(OpAMD64SETGF)
25999 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
26000 v0.AddArg2(y, x)
26001 v.AddArg(v0)
26002 return true
26003 }
26004 }
26005 func rewriteValueAMD64_OpLess32U(v *Value) bool {
26006 v_1 := v.Args[1]
26007 v_0 := v.Args[0]
26008 b := v.Block
26009
26010
26011 for {
26012 x := v_0
26013 y := v_1
26014 v.reset(OpAMD64SETB)
26015 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26016 v0.AddArg2(x, y)
26017 v.AddArg(v0)
26018 return true
26019 }
26020 }
26021 func rewriteValueAMD64_OpLess64(v *Value) bool {
26022 v_1 := v.Args[1]
26023 v_0 := v.Args[0]
26024 b := v.Block
26025
26026
26027 for {
26028 x := v_0
26029 y := v_1
26030 v.reset(OpAMD64SETL)
26031 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26032 v0.AddArg2(x, y)
26033 v.AddArg(v0)
26034 return true
26035 }
26036 }
26037 func rewriteValueAMD64_OpLess64F(v *Value) bool {
26038 v_1 := v.Args[1]
26039 v_0 := v.Args[0]
26040 b := v.Block
26041
26042
26043 for {
26044 x := v_0
26045 y := v_1
26046 v.reset(OpAMD64SETGF)
26047 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
26048 v0.AddArg2(y, x)
26049 v.AddArg(v0)
26050 return true
26051 }
26052 }
26053 func rewriteValueAMD64_OpLess64U(v *Value) bool {
26054 v_1 := v.Args[1]
26055 v_0 := v.Args[0]
26056 b := v.Block
26057
26058
26059 for {
26060 x := v_0
26061 y := v_1
26062 v.reset(OpAMD64SETB)
26063 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26064 v0.AddArg2(x, y)
26065 v.AddArg(v0)
26066 return true
26067 }
26068 }
26069 func rewriteValueAMD64_OpLess8(v *Value) bool {
26070 v_1 := v.Args[1]
26071 v_0 := v.Args[0]
26072 b := v.Block
26073
26074
26075 for {
26076 x := v_0
26077 y := v_1
26078 v.reset(OpAMD64SETL)
26079 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26080 v0.AddArg2(x, y)
26081 v.AddArg(v0)
26082 return true
26083 }
26084 }
26085 func rewriteValueAMD64_OpLess8U(v *Value) bool {
26086 v_1 := v.Args[1]
26087 v_0 := v.Args[0]
26088 b := v.Block
26089
26090
26091 for {
26092 x := v_0
26093 y := v_1
26094 v.reset(OpAMD64SETB)
26095 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26096 v0.AddArg2(x, y)
26097 v.AddArg(v0)
26098 return true
26099 }
26100 }
26101 func rewriteValueAMD64_OpLoad(v *Value) bool {
26102 v_1 := v.Args[1]
26103 v_0 := v.Args[0]
26104
26105
26106
26107 for {
26108 t := v.Type
26109 ptr := v_0
26110 mem := v_1
26111 if !(is64BitInt(t) || isPtr(t)) {
26112 break
26113 }
26114 v.reset(OpAMD64MOVQload)
26115 v.AddArg2(ptr, mem)
26116 return true
26117 }
26118
26119
26120
26121 for {
26122 t := v.Type
26123 ptr := v_0
26124 mem := v_1
26125 if !(is32BitInt(t)) {
26126 break
26127 }
26128 v.reset(OpAMD64MOVLload)
26129 v.AddArg2(ptr, mem)
26130 return true
26131 }
26132
26133
26134
26135 for {
26136 t := v.Type
26137 ptr := v_0
26138 mem := v_1
26139 if !(is16BitInt(t)) {
26140 break
26141 }
26142 v.reset(OpAMD64MOVWload)
26143 v.AddArg2(ptr, mem)
26144 return true
26145 }
26146
26147
26148
26149 for {
26150 t := v.Type
26151 ptr := v_0
26152 mem := v_1
26153 if !(t.IsBoolean() || is8BitInt(t)) {
26154 break
26155 }
26156 v.reset(OpAMD64MOVBload)
26157 v.AddArg2(ptr, mem)
26158 return true
26159 }
26160
26161
26162
26163 for {
26164 t := v.Type
26165 ptr := v_0
26166 mem := v_1
26167 if !(is32BitFloat(t)) {
26168 break
26169 }
26170 v.reset(OpAMD64MOVSSload)
26171 v.AddArg2(ptr, mem)
26172 return true
26173 }
26174
26175
26176
26177 for {
26178 t := v.Type
26179 ptr := v_0
26180 mem := v_1
26181 if !(is64BitFloat(t)) {
26182 break
26183 }
26184 v.reset(OpAMD64MOVSDload)
26185 v.AddArg2(ptr, mem)
26186 return true
26187 }
26188 return false
26189 }
26190 func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
26191 v_1 := v.Args[1]
26192 v_0 := v.Args[0]
26193 b := v.Block
26194 typ := &b.Func.Config.Types
26195
26196
26197
26198 for {
26199 t := v.Type
26200 sym := auxToSym(v.Aux)
26201 base := v_0
26202 mem := v_1
26203 if !(t.Elem().HasPointers()) {
26204 break
26205 }
26206 v.reset(OpAMD64LEAQ)
26207 v.Aux = symToAux(sym)
26208 v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
26209 v0.AddArg2(base, mem)
26210 v.AddArg(v0)
26211 return true
26212 }
26213
26214
26215
26216 for {
26217 t := v.Type
26218 sym := auxToSym(v.Aux)
26219 base := v_0
26220 if !(!t.Elem().HasPointers()) {
26221 break
26222 }
26223 v.reset(OpAMD64LEAQ)
26224 v.Aux = symToAux(sym)
26225 v.AddArg(base)
26226 return true
26227 }
26228 return false
26229 }
26230 func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
26231 v_1 := v.Args[1]
26232 v_0 := v.Args[0]
26233 b := v.Block
26234
26235
26236
26237 for {
26238 t := v.Type
26239 x := v_0
26240 y := v_1
26241 if !(!shiftIsBounded(v)) {
26242 break
26243 }
26244 v.reset(OpAMD64ANDL)
26245 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26246 v0.AddArg2(x, y)
26247 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26248 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26249 v2.AuxInt = int16ToAuxInt(32)
26250 v2.AddArg(y)
26251 v1.AddArg(v2)
26252 v.AddArg2(v0, v1)
26253 return true
26254 }
26255
26256
26257
26258 for {
26259 x := v_0
26260 y := v_1
26261 if !(shiftIsBounded(v)) {
26262 break
26263 }
26264 v.reset(OpAMD64SHLL)
26265 v.AddArg2(x, y)
26266 return true
26267 }
26268 return false
26269 }
26270 func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
26271 v_1 := v.Args[1]
26272 v_0 := v.Args[0]
26273 b := v.Block
26274
26275
26276
26277 for {
26278 t := v.Type
26279 x := v_0
26280 y := v_1
26281 if !(!shiftIsBounded(v)) {
26282 break
26283 }
26284 v.reset(OpAMD64ANDL)
26285 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26286 v0.AddArg2(x, y)
26287 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26288 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26289 v2.AuxInt = int32ToAuxInt(32)
26290 v2.AddArg(y)
26291 v1.AddArg(v2)
26292 v.AddArg2(v0, v1)
26293 return true
26294 }
26295
26296
26297
26298 for {
26299 x := v_0
26300 y := v_1
26301 if !(shiftIsBounded(v)) {
26302 break
26303 }
26304 v.reset(OpAMD64SHLL)
26305 v.AddArg2(x, y)
26306 return true
26307 }
26308 return false
26309 }
26310 func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
26311 v_1 := v.Args[1]
26312 v_0 := v.Args[0]
26313 b := v.Block
26314
26315
26316
26317 for {
26318 t := v.Type
26319 x := v_0
26320 y := v_1
26321 if !(!shiftIsBounded(v)) {
26322 break
26323 }
26324 v.reset(OpAMD64ANDL)
26325 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26326 v0.AddArg2(x, y)
26327 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26328 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26329 v2.AuxInt = int32ToAuxInt(32)
26330 v2.AddArg(y)
26331 v1.AddArg(v2)
26332 v.AddArg2(v0, v1)
26333 return true
26334 }
26335
26336
26337
26338 for {
26339 x := v_0
26340 y := v_1
26341 if !(shiftIsBounded(v)) {
26342 break
26343 }
26344 v.reset(OpAMD64SHLL)
26345 v.AddArg2(x, y)
26346 return true
26347 }
26348 return false
26349 }
26350 func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
26351 v_1 := v.Args[1]
26352 v_0 := v.Args[0]
26353 b := v.Block
26354
26355
26356
26357 for {
26358 t := v.Type
26359 x := v_0
26360 y := v_1
26361 if !(!shiftIsBounded(v)) {
26362 break
26363 }
26364 v.reset(OpAMD64ANDL)
26365 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26366 v0.AddArg2(x, y)
26367 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26368 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26369 v2.AuxInt = int8ToAuxInt(32)
26370 v2.AddArg(y)
26371 v1.AddArg(v2)
26372 v.AddArg2(v0, v1)
26373 return true
26374 }
26375
26376
26377
26378 for {
26379 x := v_0
26380 y := v_1
26381 if !(shiftIsBounded(v)) {
26382 break
26383 }
26384 v.reset(OpAMD64SHLL)
26385 v.AddArg2(x, y)
26386 return true
26387 }
26388 return false
26389 }
26390 func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
26391 v_1 := v.Args[1]
26392 v_0 := v.Args[0]
26393 b := v.Block
26394
26395
26396
26397 for {
26398 t := v.Type
26399 x := v_0
26400 y := v_1
26401 if !(!shiftIsBounded(v)) {
26402 break
26403 }
26404 v.reset(OpAMD64ANDL)
26405 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26406 v0.AddArg2(x, y)
26407 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26408 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26409 v2.AuxInt = int16ToAuxInt(32)
26410 v2.AddArg(y)
26411 v1.AddArg(v2)
26412 v.AddArg2(v0, v1)
26413 return true
26414 }
26415
26416
26417
26418 for {
26419 x := v_0
26420 y := v_1
26421 if !(shiftIsBounded(v)) {
26422 break
26423 }
26424 v.reset(OpAMD64SHLL)
26425 v.AddArg2(x, y)
26426 return true
26427 }
26428 return false
26429 }
26430 func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
26431 v_1 := v.Args[1]
26432 v_0 := v.Args[0]
26433 b := v.Block
26434
26435
26436
26437 for {
26438 t := v.Type
26439 x := v_0
26440 y := v_1
26441 if !(!shiftIsBounded(v)) {
26442 break
26443 }
26444 v.reset(OpAMD64ANDL)
26445 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26446 v0.AddArg2(x, y)
26447 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26448 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26449 v2.AuxInt = int32ToAuxInt(32)
26450 v2.AddArg(y)
26451 v1.AddArg(v2)
26452 v.AddArg2(v0, v1)
26453 return true
26454 }
26455
26456
26457
26458 for {
26459 x := v_0
26460 y := v_1
26461 if !(shiftIsBounded(v)) {
26462 break
26463 }
26464 v.reset(OpAMD64SHLL)
26465 v.AddArg2(x, y)
26466 return true
26467 }
26468 return false
26469 }
26470 func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
26471 v_1 := v.Args[1]
26472 v_0 := v.Args[0]
26473 b := v.Block
26474
26475
26476
26477 for {
26478 t := v.Type
26479 x := v_0
26480 y := v_1
26481 if !(!shiftIsBounded(v)) {
26482 break
26483 }
26484 v.reset(OpAMD64ANDL)
26485 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26486 v0.AddArg2(x, y)
26487 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26488 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26489 v2.AuxInt = int32ToAuxInt(32)
26490 v2.AddArg(y)
26491 v1.AddArg(v2)
26492 v.AddArg2(v0, v1)
26493 return true
26494 }
26495
26496
26497
26498 for {
26499 x := v_0
26500 y := v_1
26501 if !(shiftIsBounded(v)) {
26502 break
26503 }
26504 v.reset(OpAMD64SHLL)
26505 v.AddArg2(x, y)
26506 return true
26507 }
26508 return false
26509 }
26510 func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
26511 v_1 := v.Args[1]
26512 v_0 := v.Args[0]
26513 b := v.Block
26514
26515
26516
26517 for {
26518 t := v.Type
26519 x := v_0
26520 y := v_1
26521 if !(!shiftIsBounded(v)) {
26522 break
26523 }
26524 v.reset(OpAMD64ANDL)
26525 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26526 v0.AddArg2(x, y)
26527 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26528 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26529 v2.AuxInt = int8ToAuxInt(32)
26530 v2.AddArg(y)
26531 v1.AddArg(v2)
26532 v.AddArg2(v0, v1)
26533 return true
26534 }
26535
26536
26537
26538 for {
26539 x := v_0
26540 y := v_1
26541 if !(shiftIsBounded(v)) {
26542 break
26543 }
26544 v.reset(OpAMD64SHLL)
26545 v.AddArg2(x, y)
26546 return true
26547 }
26548 return false
26549 }
26550 func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
26551 v_1 := v.Args[1]
26552 v_0 := v.Args[0]
26553 b := v.Block
26554
26555
26556
26557 for {
26558 t := v.Type
26559 x := v_0
26560 y := v_1
26561 if !(!shiftIsBounded(v)) {
26562 break
26563 }
26564 v.reset(OpAMD64ANDQ)
26565 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26566 v0.AddArg2(x, y)
26567 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26568 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26569 v2.AuxInt = int16ToAuxInt(64)
26570 v2.AddArg(y)
26571 v1.AddArg(v2)
26572 v.AddArg2(v0, v1)
26573 return true
26574 }
26575
26576
26577
26578 for {
26579 x := v_0
26580 y := v_1
26581 if !(shiftIsBounded(v)) {
26582 break
26583 }
26584 v.reset(OpAMD64SHLQ)
26585 v.AddArg2(x, y)
26586 return true
26587 }
26588 return false
26589 }
26590 func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
26591 v_1 := v.Args[1]
26592 v_0 := v.Args[0]
26593 b := v.Block
26594
26595
26596
26597 for {
26598 t := v.Type
26599 x := v_0
26600 y := v_1
26601 if !(!shiftIsBounded(v)) {
26602 break
26603 }
26604 v.reset(OpAMD64ANDQ)
26605 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26606 v0.AddArg2(x, y)
26607 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26608 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26609 v2.AuxInt = int32ToAuxInt(64)
26610 v2.AddArg(y)
26611 v1.AddArg(v2)
26612 v.AddArg2(v0, v1)
26613 return true
26614 }
26615
26616
26617
26618 for {
26619 x := v_0
26620 y := v_1
26621 if !(shiftIsBounded(v)) {
26622 break
26623 }
26624 v.reset(OpAMD64SHLQ)
26625 v.AddArg2(x, y)
26626 return true
26627 }
26628 return false
26629 }
26630 func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
26631 v_1 := v.Args[1]
26632 v_0 := v.Args[0]
26633 b := v.Block
26634
26635
26636
26637 for {
26638 t := v.Type
26639 x := v_0
26640 y := v_1
26641 if !(!shiftIsBounded(v)) {
26642 break
26643 }
26644 v.reset(OpAMD64ANDQ)
26645 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26646 v0.AddArg2(x, y)
26647 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26648 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26649 v2.AuxInt = int32ToAuxInt(64)
26650 v2.AddArg(y)
26651 v1.AddArg(v2)
26652 v.AddArg2(v0, v1)
26653 return true
26654 }
26655
26656
26657
26658 for {
26659 x := v_0
26660 y := v_1
26661 if !(shiftIsBounded(v)) {
26662 break
26663 }
26664 v.reset(OpAMD64SHLQ)
26665 v.AddArg2(x, y)
26666 return true
26667 }
26668 return false
26669 }
26670 func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
26671 v_1 := v.Args[1]
26672 v_0 := v.Args[0]
26673 b := v.Block
26674
26675
26676
26677 for {
26678 t := v.Type
26679 x := v_0
26680 y := v_1
26681 if !(!shiftIsBounded(v)) {
26682 break
26683 }
26684 v.reset(OpAMD64ANDQ)
26685 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26686 v0.AddArg2(x, y)
26687 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26688 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26689 v2.AuxInt = int8ToAuxInt(64)
26690 v2.AddArg(y)
26691 v1.AddArg(v2)
26692 v.AddArg2(v0, v1)
26693 return true
26694 }
26695
26696
26697
26698 for {
26699 x := v_0
26700 y := v_1
26701 if !(shiftIsBounded(v)) {
26702 break
26703 }
26704 v.reset(OpAMD64SHLQ)
26705 v.AddArg2(x, y)
26706 return true
26707 }
26708 return false
26709 }
26710 func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
26711 v_1 := v.Args[1]
26712 v_0 := v.Args[0]
26713 b := v.Block
26714
26715
26716
26717 for {
26718 t := v.Type
26719 x := v_0
26720 y := v_1
26721 if !(!shiftIsBounded(v)) {
26722 break
26723 }
26724 v.reset(OpAMD64ANDL)
26725 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26726 v0.AddArg2(x, y)
26727 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26728 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26729 v2.AuxInt = int16ToAuxInt(32)
26730 v2.AddArg(y)
26731 v1.AddArg(v2)
26732 v.AddArg2(v0, v1)
26733 return true
26734 }
26735
26736
26737
26738 for {
26739 x := v_0
26740 y := v_1
26741 if !(shiftIsBounded(v)) {
26742 break
26743 }
26744 v.reset(OpAMD64SHLL)
26745 v.AddArg2(x, y)
26746 return true
26747 }
26748 return false
26749 }
26750 func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
26751 v_1 := v.Args[1]
26752 v_0 := v.Args[0]
26753 b := v.Block
26754
26755
26756
26757 for {
26758 t := v.Type
26759 x := v_0
26760 y := v_1
26761 if !(!shiftIsBounded(v)) {
26762 break
26763 }
26764 v.reset(OpAMD64ANDL)
26765 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26766 v0.AddArg2(x, y)
26767 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26768 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26769 v2.AuxInt = int32ToAuxInt(32)
26770 v2.AddArg(y)
26771 v1.AddArg(v2)
26772 v.AddArg2(v0, v1)
26773 return true
26774 }
26775
26776
26777
26778 for {
26779 x := v_0
26780 y := v_1
26781 if !(shiftIsBounded(v)) {
26782 break
26783 }
26784 v.reset(OpAMD64SHLL)
26785 v.AddArg2(x, y)
26786 return true
26787 }
26788 return false
26789 }
26790 func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
26791 v_1 := v.Args[1]
26792 v_0 := v.Args[0]
26793 b := v.Block
26794
26795
26796
26797 for {
26798 t := v.Type
26799 x := v_0
26800 y := v_1
26801 if !(!shiftIsBounded(v)) {
26802 break
26803 }
26804 v.reset(OpAMD64ANDL)
26805 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26806 v0.AddArg2(x, y)
26807 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26808 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26809 v2.AuxInt = int32ToAuxInt(32)
26810 v2.AddArg(y)
26811 v1.AddArg(v2)
26812 v.AddArg2(v0, v1)
26813 return true
26814 }
26815
26816
26817
26818 for {
26819 x := v_0
26820 y := v_1
26821 if !(shiftIsBounded(v)) {
26822 break
26823 }
26824 v.reset(OpAMD64SHLL)
26825 v.AddArg2(x, y)
26826 return true
26827 }
26828 return false
26829 }
26830 func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
26831 v_1 := v.Args[1]
26832 v_0 := v.Args[0]
26833 b := v.Block
26834
26835
26836
26837 for {
26838 t := v.Type
26839 x := v_0
26840 y := v_1
26841 if !(!shiftIsBounded(v)) {
26842 break
26843 }
26844 v.reset(OpAMD64ANDL)
26845 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26846 v0.AddArg2(x, y)
26847 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26848 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26849 v2.AuxInt = int8ToAuxInt(32)
26850 v2.AddArg(y)
26851 v1.AddArg(v2)
26852 v.AddArg2(v0, v1)
26853 return true
26854 }
26855
26856
26857
26858 for {
26859 x := v_0
26860 y := v_1
26861 if !(shiftIsBounded(v)) {
26862 break
26863 }
26864 v.reset(OpAMD64SHLL)
26865 v.AddArg2(x, y)
26866 return true
26867 }
26868 return false
26869 }
26870 func rewriteValueAMD64_OpMax32F(v *Value) bool {
26871 v_1 := v.Args[1]
26872 v_0 := v.Args[0]
26873 b := v.Block
26874
26875
26876 for {
26877 t := v.Type
26878 x := v_0
26879 y := v_1
26880 v.reset(OpNeg32F)
26881 v.Type = t
26882 v0 := b.NewValue0(v.Pos, OpMin32F, t)
26883 v1 := b.NewValue0(v.Pos, OpNeg32F, t)
26884 v1.AddArg(x)
26885 v2 := b.NewValue0(v.Pos, OpNeg32F, t)
26886 v2.AddArg(y)
26887 v0.AddArg2(v1, v2)
26888 v.AddArg(v0)
26889 return true
26890 }
26891 }
26892 func rewriteValueAMD64_OpMax64F(v *Value) bool {
26893 v_1 := v.Args[1]
26894 v_0 := v.Args[0]
26895 b := v.Block
26896
26897
26898 for {
26899 t := v.Type
26900 x := v_0
26901 y := v_1
26902 v.reset(OpNeg64F)
26903 v.Type = t
26904 v0 := b.NewValue0(v.Pos, OpMin64F, t)
26905 v1 := b.NewValue0(v.Pos, OpNeg64F, t)
26906 v1.AddArg(x)
26907 v2 := b.NewValue0(v.Pos, OpNeg64F, t)
26908 v2.AddArg(y)
26909 v0.AddArg2(v1, v2)
26910 v.AddArg(v0)
26911 return true
26912 }
26913 }
26914 func rewriteValueAMD64_OpMin32F(v *Value) bool {
26915 v_1 := v.Args[1]
26916 v_0 := v.Args[0]
26917 b := v.Block
26918
26919
26920 for {
26921 t := v.Type
26922 x := v_0
26923 y := v_1
26924 v.reset(OpAMD64POR)
26925 v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
26926 v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
26927 v1.AddArg2(x, y)
26928 v0.AddArg2(v1, x)
26929 v.AddArg2(v0, v1)
26930 return true
26931 }
26932 }
26933 func rewriteValueAMD64_OpMin64F(v *Value) bool {
26934 v_1 := v.Args[1]
26935 v_0 := v.Args[0]
26936 b := v.Block
26937
26938
26939 for {
26940 t := v.Type
26941 x := v_0
26942 y := v_1
26943 v.reset(OpAMD64POR)
26944 v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
26945 v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
26946 v1.AddArg2(x, y)
26947 v0.AddArg2(v1, x)
26948 v.AddArg2(v0, v1)
26949 return true
26950 }
26951 }
26952 func rewriteValueAMD64_OpMod16(v *Value) bool {
26953 v_1 := v.Args[1]
26954 v_0 := v.Args[0]
26955 b := v.Block
26956 typ := &b.Func.Config.Types
26957
26958
26959 for {
26960 a := auxIntToBool(v.AuxInt)
26961 x := v_0
26962 y := v_1
26963 v.reset(OpSelect1)
26964 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
26965 v0.AuxInt = boolToAuxInt(a)
26966 v0.AddArg2(x, y)
26967 v.AddArg(v0)
26968 return true
26969 }
26970 }
26971 func rewriteValueAMD64_OpMod16u(v *Value) bool {
26972 v_1 := v.Args[1]
26973 v_0 := v.Args[0]
26974 b := v.Block
26975 typ := &b.Func.Config.Types
26976
26977
26978 for {
26979 x := v_0
26980 y := v_1
26981 v.reset(OpSelect1)
26982 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
26983 v0.AddArg2(x, y)
26984 v.AddArg(v0)
26985 return true
26986 }
26987 }
26988 func rewriteValueAMD64_OpMod32(v *Value) bool {
26989 v_1 := v.Args[1]
26990 v_0 := v.Args[0]
26991 b := v.Block
26992 typ := &b.Func.Config.Types
26993
26994
26995 for {
26996 a := auxIntToBool(v.AuxInt)
26997 x := v_0
26998 y := v_1
26999 v.reset(OpSelect1)
27000 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
27001 v0.AuxInt = boolToAuxInt(a)
27002 v0.AddArg2(x, y)
27003 v.AddArg(v0)
27004 return true
27005 }
27006 }
27007 func rewriteValueAMD64_OpMod32u(v *Value) bool {
27008 v_1 := v.Args[1]
27009 v_0 := v.Args[0]
27010 b := v.Block
27011 typ := &b.Func.Config.Types
27012
27013
27014 for {
27015 x := v_0
27016 y := v_1
27017 v.reset(OpSelect1)
27018 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
27019 v0.AddArg2(x, y)
27020 v.AddArg(v0)
27021 return true
27022 }
27023 }
27024 func rewriteValueAMD64_OpMod64(v *Value) bool {
27025 v_1 := v.Args[1]
27026 v_0 := v.Args[0]
27027 b := v.Block
27028 typ := &b.Func.Config.Types
27029
27030
27031 for {
27032 a := auxIntToBool(v.AuxInt)
27033 x := v_0
27034 y := v_1
27035 v.reset(OpSelect1)
27036 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
27037 v0.AuxInt = boolToAuxInt(a)
27038 v0.AddArg2(x, y)
27039 v.AddArg(v0)
27040 return true
27041 }
27042 }
27043 func rewriteValueAMD64_OpMod64u(v *Value) bool {
27044 v_1 := v.Args[1]
27045 v_0 := v.Args[0]
27046 b := v.Block
27047 typ := &b.Func.Config.Types
27048
27049
27050 for {
27051 x := v_0
27052 y := v_1
27053 v.reset(OpSelect1)
27054 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
27055 v0.AddArg2(x, y)
27056 v.AddArg(v0)
27057 return true
27058 }
27059 }
27060 func rewriteValueAMD64_OpMod8(v *Value) bool {
27061 v_1 := v.Args[1]
27062 v_0 := v.Args[0]
27063 b := v.Block
27064 typ := &b.Func.Config.Types
27065
27066
27067 for {
27068 x := v_0
27069 y := v_1
27070 v.reset(OpSelect1)
27071 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
27072 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27073 v1.AddArg(x)
27074 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27075 v2.AddArg(y)
27076 v0.AddArg2(v1, v2)
27077 v.AddArg(v0)
27078 return true
27079 }
27080 }
27081 func rewriteValueAMD64_OpMod8u(v *Value) bool {
27082 v_1 := v.Args[1]
27083 v_0 := v.Args[0]
27084 b := v.Block
27085 typ := &b.Func.Config.Types
27086
27087
27088 for {
27089 x := v_0
27090 y := v_1
27091 v.reset(OpSelect1)
27092 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
27093 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27094 v1.AddArg(x)
27095 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27096 v2.AddArg(y)
27097 v0.AddArg2(v1, v2)
27098 v.AddArg(v0)
27099 return true
27100 }
27101 }
27102 func rewriteValueAMD64_OpMove(v *Value) bool {
27103 v_2 := v.Args[2]
27104 v_1 := v.Args[1]
27105 v_0 := v.Args[0]
27106 b := v.Block
27107 config := b.Func.Config
27108 typ := &b.Func.Config.Types
27109
27110
27111 for {
27112 if auxIntToInt64(v.AuxInt) != 0 {
27113 break
27114 }
27115 mem := v_2
27116 v.copyOf(mem)
27117 return true
27118 }
27119
27120
27121 for {
27122 if auxIntToInt64(v.AuxInt) != 1 {
27123 break
27124 }
27125 dst := v_0
27126 src := v_1
27127 mem := v_2
27128 v.reset(OpAMD64MOVBstore)
27129 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27130 v0.AddArg2(src, mem)
27131 v.AddArg3(dst, v0, mem)
27132 return true
27133 }
27134
27135
27136 for {
27137 if auxIntToInt64(v.AuxInt) != 2 {
27138 break
27139 }
27140 dst := v_0
27141 src := v_1
27142 mem := v_2
27143 v.reset(OpAMD64MOVWstore)
27144 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27145 v0.AddArg2(src, mem)
27146 v.AddArg3(dst, v0, mem)
27147 return true
27148 }
27149
27150
27151 for {
27152 if auxIntToInt64(v.AuxInt) != 4 {
27153 break
27154 }
27155 dst := v_0
27156 src := v_1
27157 mem := v_2
27158 v.reset(OpAMD64MOVLstore)
27159 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27160 v0.AddArg2(src, mem)
27161 v.AddArg3(dst, v0, mem)
27162 return true
27163 }
27164
27165
27166 for {
27167 if auxIntToInt64(v.AuxInt) != 8 {
27168 break
27169 }
27170 dst := v_0
27171 src := v_1
27172 mem := v_2
27173 v.reset(OpAMD64MOVQstore)
27174 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27175 v0.AddArg2(src, mem)
27176 v.AddArg3(dst, v0, mem)
27177 return true
27178 }
27179
27180
27181
27182 for {
27183 if auxIntToInt64(v.AuxInt) != 16 {
27184 break
27185 }
27186 dst := v_0
27187 src := v_1
27188 mem := v_2
27189 if !(config.useSSE) {
27190 break
27191 }
27192 v.reset(OpAMD64MOVOstore)
27193 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27194 v0.AddArg2(src, mem)
27195 v.AddArg3(dst, v0, mem)
27196 return true
27197 }
27198
27199
27200
27201 for {
27202 if auxIntToInt64(v.AuxInt) != 16 {
27203 break
27204 }
27205 dst := v_0
27206 src := v_1
27207 mem := v_2
27208 if !(!config.useSSE) {
27209 break
27210 }
27211 v.reset(OpAMD64MOVQstore)
27212 v.AuxInt = int32ToAuxInt(8)
27213 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27214 v0.AuxInt = int32ToAuxInt(8)
27215 v0.AddArg2(src, mem)
27216 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27217 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27218 v2.AddArg2(src, mem)
27219 v1.AddArg3(dst, v2, mem)
27220 v.AddArg3(dst, v0, v1)
27221 return true
27222 }
27223
27224
27225 for {
27226 if auxIntToInt64(v.AuxInt) != 32 {
27227 break
27228 }
27229 dst := v_0
27230 src := v_1
27231 mem := v_2
27232 v.reset(OpMove)
27233 v.AuxInt = int64ToAuxInt(16)
27234 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27235 v0.AuxInt = int64ToAuxInt(16)
27236 v0.AddArg(dst)
27237 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27238 v1.AuxInt = int64ToAuxInt(16)
27239 v1.AddArg(src)
27240 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27241 v2.AuxInt = int64ToAuxInt(16)
27242 v2.AddArg3(dst, src, mem)
27243 v.AddArg3(v0, v1, v2)
27244 return true
27245 }
27246
27247
27248
27249 for {
27250 if auxIntToInt64(v.AuxInt) != 48 {
27251 break
27252 }
27253 dst := v_0
27254 src := v_1
27255 mem := v_2
27256 if !(config.useSSE) {
27257 break
27258 }
27259 v.reset(OpMove)
27260 v.AuxInt = int64ToAuxInt(32)
27261 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27262 v0.AuxInt = int64ToAuxInt(16)
27263 v0.AddArg(dst)
27264 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27265 v1.AuxInt = int64ToAuxInt(16)
27266 v1.AddArg(src)
27267 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27268 v2.AuxInt = int64ToAuxInt(16)
27269 v2.AddArg3(dst, src, mem)
27270 v.AddArg3(v0, v1, v2)
27271 return true
27272 }
27273
27274
27275
27276 for {
27277 if auxIntToInt64(v.AuxInt) != 64 {
27278 break
27279 }
27280 dst := v_0
27281 src := v_1
27282 mem := v_2
27283 if !(config.useSSE) {
27284 break
27285 }
27286 v.reset(OpMove)
27287 v.AuxInt = int64ToAuxInt(32)
27288 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27289 v0.AuxInt = int64ToAuxInt(32)
27290 v0.AddArg(dst)
27291 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27292 v1.AuxInt = int64ToAuxInt(32)
27293 v1.AddArg(src)
27294 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27295 v2.AuxInt = int64ToAuxInt(32)
27296 v2.AddArg3(dst, src, mem)
27297 v.AddArg3(v0, v1, v2)
27298 return true
27299 }
27300
27301
27302 for {
27303 if auxIntToInt64(v.AuxInt) != 3 {
27304 break
27305 }
27306 dst := v_0
27307 src := v_1
27308 mem := v_2
27309 v.reset(OpAMD64MOVBstore)
27310 v.AuxInt = int32ToAuxInt(2)
27311 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27312 v0.AuxInt = int32ToAuxInt(2)
27313 v0.AddArg2(src, mem)
27314 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
27315 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27316 v2.AddArg2(src, mem)
27317 v1.AddArg3(dst, v2, mem)
27318 v.AddArg3(dst, v0, v1)
27319 return true
27320 }
27321
27322
27323 for {
27324 if auxIntToInt64(v.AuxInt) != 5 {
27325 break
27326 }
27327 dst := v_0
27328 src := v_1
27329 mem := v_2
27330 v.reset(OpAMD64MOVBstore)
27331 v.AuxInt = int32ToAuxInt(4)
27332 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27333 v0.AuxInt = int32ToAuxInt(4)
27334 v0.AddArg2(src, mem)
27335 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27336 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27337 v2.AddArg2(src, mem)
27338 v1.AddArg3(dst, v2, mem)
27339 v.AddArg3(dst, v0, v1)
27340 return true
27341 }
27342
27343
27344 for {
27345 if auxIntToInt64(v.AuxInt) != 6 {
27346 break
27347 }
27348 dst := v_0
27349 src := v_1
27350 mem := v_2
27351 v.reset(OpAMD64MOVWstore)
27352 v.AuxInt = int32ToAuxInt(4)
27353 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27354 v0.AuxInt = int32ToAuxInt(4)
27355 v0.AddArg2(src, mem)
27356 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27357 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27358 v2.AddArg2(src, mem)
27359 v1.AddArg3(dst, v2, mem)
27360 v.AddArg3(dst, v0, v1)
27361 return true
27362 }
27363
27364
27365 for {
27366 if auxIntToInt64(v.AuxInt) != 7 {
27367 break
27368 }
27369 dst := v_0
27370 src := v_1
27371 mem := v_2
27372 v.reset(OpAMD64MOVLstore)
27373 v.AuxInt = int32ToAuxInt(3)
27374 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27375 v0.AuxInt = int32ToAuxInt(3)
27376 v0.AddArg2(src, mem)
27377 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27378 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27379 v2.AddArg2(src, mem)
27380 v1.AddArg3(dst, v2, mem)
27381 v.AddArg3(dst, v0, v1)
27382 return true
27383 }
27384
27385
27386 for {
27387 if auxIntToInt64(v.AuxInt) != 9 {
27388 break
27389 }
27390 dst := v_0
27391 src := v_1
27392 mem := v_2
27393 v.reset(OpAMD64MOVBstore)
27394 v.AuxInt = int32ToAuxInt(8)
27395 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27396 v0.AuxInt = int32ToAuxInt(8)
27397 v0.AddArg2(src, mem)
27398 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27399 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27400 v2.AddArg2(src, mem)
27401 v1.AddArg3(dst, v2, mem)
27402 v.AddArg3(dst, v0, v1)
27403 return true
27404 }
27405
27406
27407 for {
27408 if auxIntToInt64(v.AuxInt) != 10 {
27409 break
27410 }
27411 dst := v_0
27412 src := v_1
27413 mem := v_2
27414 v.reset(OpAMD64MOVWstore)
27415 v.AuxInt = int32ToAuxInt(8)
27416 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27417 v0.AuxInt = int32ToAuxInt(8)
27418 v0.AddArg2(src, mem)
27419 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27420 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27421 v2.AddArg2(src, mem)
27422 v1.AddArg3(dst, v2, mem)
27423 v.AddArg3(dst, v0, v1)
27424 return true
27425 }
27426
27427
27428 for {
27429 if auxIntToInt64(v.AuxInt) != 11 {
27430 break
27431 }
27432 dst := v_0
27433 src := v_1
27434 mem := v_2
27435 v.reset(OpAMD64MOVLstore)
27436 v.AuxInt = int32ToAuxInt(7)
27437 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27438 v0.AuxInt = int32ToAuxInt(7)
27439 v0.AddArg2(src, mem)
27440 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27441 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27442 v2.AddArg2(src, mem)
27443 v1.AddArg3(dst, v2, mem)
27444 v.AddArg3(dst, v0, v1)
27445 return true
27446 }
27447
27448
27449 for {
27450 if auxIntToInt64(v.AuxInt) != 12 {
27451 break
27452 }
27453 dst := v_0
27454 src := v_1
27455 mem := v_2
27456 v.reset(OpAMD64MOVLstore)
27457 v.AuxInt = int32ToAuxInt(8)
27458 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27459 v0.AuxInt = int32ToAuxInt(8)
27460 v0.AddArg2(src, mem)
27461 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27462 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27463 v2.AddArg2(src, mem)
27464 v1.AddArg3(dst, v2, mem)
27465 v.AddArg3(dst, v0, v1)
27466 return true
27467 }
27468
27469
27470
27471 for {
27472 s := auxIntToInt64(v.AuxInt)
27473 dst := v_0
27474 src := v_1
27475 mem := v_2
27476 if !(s >= 13 && s <= 15) {
27477 break
27478 }
27479 v.reset(OpAMD64MOVQstore)
27480 v.AuxInt = int32ToAuxInt(int32(s - 8))
27481 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27482 v0.AuxInt = int32ToAuxInt(int32(s - 8))
27483 v0.AddArg2(src, mem)
27484 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27485 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27486 v2.AddArg2(src, mem)
27487 v1.AddArg3(dst, v2, mem)
27488 v.AddArg3(dst, v0, v1)
27489 return true
27490 }
27491
27492
27493
27494 for {
27495 s := auxIntToInt64(v.AuxInt)
27496 dst := v_0
27497 src := v_1
27498 mem := v_2
27499 if !(s > 16 && s%16 != 0 && s%16 <= 8) {
27500 break
27501 }
27502 v.reset(OpMove)
27503 v.AuxInt = int64ToAuxInt(s - s%16)
27504 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27505 v0.AuxInt = int64ToAuxInt(s % 16)
27506 v0.AddArg(dst)
27507 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27508 v1.AuxInt = int64ToAuxInt(s % 16)
27509 v1.AddArg(src)
27510 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27511 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27512 v3.AddArg2(src, mem)
27513 v2.AddArg3(dst, v3, mem)
27514 v.AddArg3(v0, v1, v2)
27515 return true
27516 }
27517
27518
27519
27520 for {
27521 s := auxIntToInt64(v.AuxInt)
27522 dst := v_0
27523 src := v_1
27524 mem := v_2
27525 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
27526 break
27527 }
27528 v.reset(OpMove)
27529 v.AuxInt = int64ToAuxInt(s - s%16)
27530 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27531 v0.AuxInt = int64ToAuxInt(s % 16)
27532 v0.AddArg(dst)
27533 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27534 v1.AuxInt = int64ToAuxInt(s % 16)
27535 v1.AddArg(src)
27536 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
27537 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27538 v3.AddArg2(src, mem)
27539 v2.AddArg3(dst, v3, mem)
27540 v.AddArg3(v0, v1, v2)
27541 return true
27542 }
27543
27544
27545
27546 for {
27547 s := auxIntToInt64(v.AuxInt)
27548 dst := v_0
27549 src := v_1
27550 mem := v_2
27551 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
27552 break
27553 }
27554 v.reset(OpMove)
27555 v.AuxInt = int64ToAuxInt(s - s%16)
27556 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27557 v0.AuxInt = int64ToAuxInt(s % 16)
27558 v0.AddArg(dst)
27559 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27560 v1.AuxInt = int64ToAuxInt(s % 16)
27561 v1.AddArg(src)
27562 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27563 v2.AuxInt = int32ToAuxInt(8)
27564 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27565 v3.AuxInt = int32ToAuxInt(8)
27566 v3.AddArg2(src, mem)
27567 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27568 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27569 v5.AddArg2(src, mem)
27570 v4.AddArg3(dst, v5, mem)
27571 v2.AddArg3(dst, v3, v4)
27572 v.AddArg3(v0, v1, v2)
27573 return true
27574 }
27575
27576
27577
27578 for {
27579 s := auxIntToInt64(v.AuxInt)
27580 dst := v_0
27581 src := v_1
27582 mem := v_2
27583 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
27584 break
27585 }
27586 v.reset(OpAMD64DUFFCOPY)
27587 v.AuxInt = int64ToAuxInt(s)
27588 v.AddArg3(dst, src, mem)
27589 return true
27590 }
27591
27592
27593
27594 for {
27595 s := auxIntToInt64(v.AuxInt)
27596 dst := v_0
27597 src := v_1
27598 mem := v_2
27599 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) {
27600 break
27601 }
27602 v.reset(OpAMD64REPMOVSQ)
27603 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
27604 v0.AuxInt = int64ToAuxInt(s / 8)
27605 v.AddArg4(dst, src, v0, mem)
27606 return true
27607 }
27608 return false
27609 }
27610 func rewriteValueAMD64_OpNeg32F(v *Value) bool {
27611 v_0 := v.Args[0]
27612 b := v.Block
27613 typ := &b.Func.Config.Types
27614
27615
27616 for {
27617 x := v_0
27618 v.reset(OpAMD64PXOR)
27619 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
27620 v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
27621 v.AddArg2(x, v0)
27622 return true
27623 }
27624 }
27625 func rewriteValueAMD64_OpNeg64F(v *Value) bool {
27626 v_0 := v.Args[0]
27627 b := v.Block
27628 typ := &b.Func.Config.Types
27629
27630
27631 for {
27632 x := v_0
27633 v.reset(OpAMD64PXOR)
27634 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
27635 v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
27636 v.AddArg2(x, v0)
27637 return true
27638 }
27639 }
27640 func rewriteValueAMD64_OpNeq16(v *Value) bool {
27641 v_1 := v.Args[1]
27642 v_0 := v.Args[0]
27643 b := v.Block
27644
27645
27646 for {
27647 x := v_0
27648 y := v_1
27649 v.reset(OpAMD64SETNE)
27650 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
27651 v0.AddArg2(x, y)
27652 v.AddArg(v0)
27653 return true
27654 }
27655 }
27656 func rewriteValueAMD64_OpNeq32(v *Value) bool {
27657 v_1 := v.Args[1]
27658 v_0 := v.Args[0]
27659 b := v.Block
27660
27661
27662 for {
27663 x := v_0
27664 y := v_1
27665 v.reset(OpAMD64SETNE)
27666 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
27667 v0.AddArg2(x, y)
27668 v.AddArg(v0)
27669 return true
27670 }
27671 }
27672 func rewriteValueAMD64_OpNeq32F(v *Value) bool {
27673 v_1 := v.Args[1]
27674 v_0 := v.Args[0]
27675 b := v.Block
27676
27677
27678 for {
27679 x := v_0
27680 y := v_1
27681 v.reset(OpAMD64SETNEF)
27682 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
27683 v0.AddArg2(x, y)
27684 v.AddArg(v0)
27685 return true
27686 }
27687 }
27688 func rewriteValueAMD64_OpNeq64(v *Value) bool {
27689 v_1 := v.Args[1]
27690 v_0 := v.Args[0]
27691 b := v.Block
27692
27693
27694 for {
27695 x := v_0
27696 y := v_1
27697 v.reset(OpAMD64SETNE)
27698 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
27699 v0.AddArg2(x, y)
27700 v.AddArg(v0)
27701 return true
27702 }
27703 }
27704 func rewriteValueAMD64_OpNeq64F(v *Value) bool {
27705 v_1 := v.Args[1]
27706 v_0 := v.Args[0]
27707 b := v.Block
27708
27709
27710 for {
27711 x := v_0
27712 y := v_1
27713 v.reset(OpAMD64SETNEF)
27714 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
27715 v0.AddArg2(x, y)
27716 v.AddArg(v0)
27717 return true
27718 }
27719 }
27720 func rewriteValueAMD64_OpNeq8(v *Value) bool {
27721 v_1 := v.Args[1]
27722 v_0 := v.Args[0]
27723 b := v.Block
27724
27725
27726 for {
27727 x := v_0
27728 y := v_1
27729 v.reset(OpAMD64SETNE)
27730 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
27731 v0.AddArg2(x, y)
27732 v.AddArg(v0)
27733 return true
27734 }
27735 }
27736 func rewriteValueAMD64_OpNeqB(v *Value) bool {
27737 v_1 := v.Args[1]
27738 v_0 := v.Args[0]
27739 b := v.Block
27740
27741
27742 for {
27743 x := v_0
27744 y := v_1
27745 v.reset(OpAMD64SETNE)
27746 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
27747 v0.AddArg2(x, y)
27748 v.AddArg(v0)
27749 return true
27750 }
27751 }
27752 func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
27753 v_1 := v.Args[1]
27754 v_0 := v.Args[0]
27755 b := v.Block
27756
27757
27758 for {
27759 x := v_0
27760 y := v_1
27761 v.reset(OpAMD64SETNE)
27762 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
27763 v0.AddArg2(x, y)
27764 v.AddArg(v0)
27765 return true
27766 }
27767 }
27768 func rewriteValueAMD64_OpNot(v *Value) bool {
27769 v_0 := v.Args[0]
27770
27771
27772 for {
27773 x := v_0
27774 v.reset(OpAMD64XORLconst)
27775 v.AuxInt = int32ToAuxInt(1)
27776 v.AddArg(x)
27777 return true
27778 }
27779 }
27780 func rewriteValueAMD64_OpOffPtr(v *Value) bool {
27781 v_0 := v.Args[0]
27782 b := v.Block
27783 typ := &b.Func.Config.Types
27784
27785
27786
27787 for {
27788 off := auxIntToInt64(v.AuxInt)
27789 ptr := v_0
27790 if !(is32Bit(off)) {
27791 break
27792 }
27793 v.reset(OpAMD64ADDQconst)
27794 v.AuxInt = int32ToAuxInt(int32(off))
27795 v.AddArg(ptr)
27796 return true
27797 }
27798
27799
27800 for {
27801 off := auxIntToInt64(v.AuxInt)
27802 ptr := v_0
27803 v.reset(OpAMD64ADDQ)
27804 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
27805 v0.AuxInt = int64ToAuxInt(off)
27806 v.AddArg2(v0, ptr)
27807 return true
27808 }
27809 }
27810 func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
27811 v_2 := v.Args[2]
27812 v_1 := v.Args[1]
27813 v_0 := v.Args[0]
27814
27815
27816
27817 for {
27818 kind := auxIntToInt64(v.AuxInt)
27819 x := v_0
27820 y := v_1
27821 mem := v_2
27822 if !(boundsABI(kind) == 0) {
27823 break
27824 }
27825 v.reset(OpAMD64LoweredPanicBoundsA)
27826 v.AuxInt = int64ToAuxInt(kind)
27827 v.AddArg3(x, y, mem)
27828 return true
27829 }
27830
27831
27832
27833 for {
27834 kind := auxIntToInt64(v.AuxInt)
27835 x := v_0
27836 y := v_1
27837 mem := v_2
27838 if !(boundsABI(kind) == 1) {
27839 break
27840 }
27841 v.reset(OpAMD64LoweredPanicBoundsB)
27842 v.AuxInt = int64ToAuxInt(kind)
27843 v.AddArg3(x, y, mem)
27844 return true
27845 }
27846
27847
27848
27849 for {
27850 kind := auxIntToInt64(v.AuxInt)
27851 x := v_0
27852 y := v_1
27853 mem := v_2
27854 if !(boundsABI(kind) == 2) {
27855 break
27856 }
27857 v.reset(OpAMD64LoweredPanicBoundsC)
27858 v.AuxInt = int64ToAuxInt(kind)
27859 v.AddArg3(x, y, mem)
27860 return true
27861 }
27862 return false
27863 }
27864 func rewriteValueAMD64_OpPopCount16(v *Value) bool {
27865 v_0 := v.Args[0]
27866 b := v.Block
27867 typ := &b.Func.Config.Types
27868
27869
27870 for {
27871 x := v_0
27872 v.reset(OpAMD64POPCNTL)
27873 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
27874 v0.AddArg(x)
27875 v.AddArg(v0)
27876 return true
27877 }
27878 }
27879 func rewriteValueAMD64_OpPopCount8(v *Value) bool {
27880 v_0 := v.Args[0]
27881 b := v.Block
27882 typ := &b.Func.Config.Types
27883
27884
27885 for {
27886 x := v_0
27887 v.reset(OpAMD64POPCNTL)
27888 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
27889 v0.AddArg(x)
27890 v.AddArg(v0)
27891 return true
27892 }
27893 }
27894 func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
27895 v_0 := v.Args[0]
27896
27897
27898 for {
27899 x := v_0
27900 v.reset(OpAMD64ROUNDSD)
27901 v.AuxInt = int8ToAuxInt(0)
27902 v.AddArg(x)
27903 return true
27904 }
27905 }
27906 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
27907 v_1 := v.Args[1]
27908 v_0 := v.Args[0]
27909 b := v.Block
27910
27911
27912
27913 for {
27914 t := v.Type
27915 x := v_0
27916 y := v_1
27917 if !(!shiftIsBounded(v)) {
27918 break
27919 }
27920 v.reset(OpAMD64ANDL)
27921 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
27922 v0.AddArg2(x, y)
27923 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27924 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
27925 v2.AuxInt = int16ToAuxInt(16)
27926 v2.AddArg(y)
27927 v1.AddArg(v2)
27928 v.AddArg2(v0, v1)
27929 return true
27930 }
27931
27932
27933
27934 for {
27935 x := v_0
27936 y := v_1
27937 if !(shiftIsBounded(v)) {
27938 break
27939 }
27940 v.reset(OpAMD64SHRW)
27941 v.AddArg2(x, y)
27942 return true
27943 }
27944 return false
27945 }
27946 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
27947 v_1 := v.Args[1]
27948 v_0 := v.Args[0]
27949 b := v.Block
27950
27951
27952
27953 for {
27954 t := v.Type
27955 x := v_0
27956 y := v_1
27957 if !(!shiftIsBounded(v)) {
27958 break
27959 }
27960 v.reset(OpAMD64ANDL)
27961 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
27962 v0.AddArg2(x, y)
27963 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27964 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
27965 v2.AuxInt = int32ToAuxInt(16)
27966 v2.AddArg(y)
27967 v1.AddArg(v2)
27968 v.AddArg2(v0, v1)
27969 return true
27970 }
27971
27972
27973
27974 for {
27975 x := v_0
27976 y := v_1
27977 if !(shiftIsBounded(v)) {
27978 break
27979 }
27980 v.reset(OpAMD64SHRW)
27981 v.AddArg2(x, y)
27982 return true
27983 }
27984 return false
27985 }
27986 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
27987 v_1 := v.Args[1]
27988 v_0 := v.Args[0]
27989 b := v.Block
27990
27991
27992
27993 for {
27994 t := v.Type
27995 x := v_0
27996 y := v_1
27997 if !(!shiftIsBounded(v)) {
27998 break
27999 }
28000 v.reset(OpAMD64ANDL)
28001 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28002 v0.AddArg2(x, y)
28003 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28004 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28005 v2.AuxInt = int32ToAuxInt(16)
28006 v2.AddArg(y)
28007 v1.AddArg(v2)
28008 v.AddArg2(v0, v1)
28009 return true
28010 }
28011
28012
28013
28014 for {
28015 x := v_0
28016 y := v_1
28017 if !(shiftIsBounded(v)) {
28018 break
28019 }
28020 v.reset(OpAMD64SHRW)
28021 v.AddArg2(x, y)
28022 return true
28023 }
28024 return false
28025 }
28026 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
28027 v_1 := v.Args[1]
28028 v_0 := v.Args[0]
28029 b := v.Block
28030
28031
28032
28033 for {
28034 t := v.Type
28035 x := v_0
28036 y := v_1
28037 if !(!shiftIsBounded(v)) {
28038 break
28039 }
28040 v.reset(OpAMD64ANDL)
28041 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28042 v0.AddArg2(x, y)
28043 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28044 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28045 v2.AuxInt = int8ToAuxInt(16)
28046 v2.AddArg(y)
28047 v1.AddArg(v2)
28048 v.AddArg2(v0, v1)
28049 return true
28050 }
28051
28052
28053
28054 for {
28055 x := v_0
28056 y := v_1
28057 if !(shiftIsBounded(v)) {
28058 break
28059 }
28060 v.reset(OpAMD64SHRW)
28061 v.AddArg2(x, y)
28062 return true
28063 }
28064 return false
28065 }
28066 func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
28067 v_1 := v.Args[1]
28068 v_0 := v.Args[0]
28069 b := v.Block
28070
28071
28072
28073 for {
28074 t := v.Type
28075 x := v_0
28076 y := v_1
28077 if !(!shiftIsBounded(v)) {
28078 break
28079 }
28080 v.reset(OpAMD64SARW)
28081 v.Type = t
28082 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28083 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28084 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28085 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28086 v3.AuxInt = int16ToAuxInt(16)
28087 v3.AddArg(y)
28088 v2.AddArg(v3)
28089 v1.AddArg(v2)
28090 v0.AddArg2(y, v1)
28091 v.AddArg2(x, v0)
28092 return true
28093 }
28094
28095
28096
28097 for {
28098 x := v_0
28099 y := v_1
28100 if !(shiftIsBounded(v)) {
28101 break
28102 }
28103 v.reset(OpAMD64SARW)
28104 v.AddArg2(x, y)
28105 return true
28106 }
28107 return false
28108 }
28109 func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
28110 v_1 := v.Args[1]
28111 v_0 := v.Args[0]
28112 b := v.Block
28113
28114
28115
28116 for {
28117 t := v.Type
28118 x := v_0
28119 y := v_1
28120 if !(!shiftIsBounded(v)) {
28121 break
28122 }
28123 v.reset(OpAMD64SARW)
28124 v.Type = t
28125 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28126 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28127 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28128 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28129 v3.AuxInt = int32ToAuxInt(16)
28130 v3.AddArg(y)
28131 v2.AddArg(v3)
28132 v1.AddArg(v2)
28133 v0.AddArg2(y, v1)
28134 v.AddArg2(x, v0)
28135 return true
28136 }
28137
28138
28139
28140 for {
28141 x := v_0
28142 y := v_1
28143 if !(shiftIsBounded(v)) {
28144 break
28145 }
28146 v.reset(OpAMD64SARW)
28147 v.AddArg2(x, y)
28148 return true
28149 }
28150 return false
28151 }
28152 func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
28153 v_1 := v.Args[1]
28154 v_0 := v.Args[0]
28155 b := v.Block
28156
28157
28158
28159 for {
28160 t := v.Type
28161 x := v_0
28162 y := v_1
28163 if !(!shiftIsBounded(v)) {
28164 break
28165 }
28166 v.reset(OpAMD64SARW)
28167 v.Type = t
28168 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28169 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28170 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28171 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28172 v3.AuxInt = int32ToAuxInt(16)
28173 v3.AddArg(y)
28174 v2.AddArg(v3)
28175 v1.AddArg(v2)
28176 v0.AddArg2(y, v1)
28177 v.AddArg2(x, v0)
28178 return true
28179 }
28180
28181
28182
28183 for {
28184 x := v_0
28185 y := v_1
28186 if !(shiftIsBounded(v)) {
28187 break
28188 }
28189 v.reset(OpAMD64SARW)
28190 v.AddArg2(x, y)
28191 return true
28192 }
28193 return false
28194 }
28195 func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
28196 v_1 := v.Args[1]
28197 v_0 := v.Args[0]
28198 b := v.Block
28199
28200
28201
28202 for {
28203 t := v.Type
28204 x := v_0
28205 y := v_1
28206 if !(!shiftIsBounded(v)) {
28207 break
28208 }
28209 v.reset(OpAMD64SARW)
28210 v.Type = t
28211 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28212 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28213 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28214 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28215 v3.AuxInt = int8ToAuxInt(16)
28216 v3.AddArg(y)
28217 v2.AddArg(v3)
28218 v1.AddArg(v2)
28219 v0.AddArg2(y, v1)
28220 v.AddArg2(x, v0)
28221 return true
28222 }
28223
28224
28225
28226 for {
28227 x := v_0
28228 y := v_1
28229 if !(shiftIsBounded(v)) {
28230 break
28231 }
28232 v.reset(OpAMD64SARW)
28233 v.AddArg2(x, y)
28234 return true
28235 }
28236 return false
28237 }
28238 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
28239 v_1 := v.Args[1]
28240 v_0 := v.Args[0]
28241 b := v.Block
28242
28243
28244
28245 for {
28246 t := v.Type
28247 x := v_0
28248 y := v_1
28249 if !(!shiftIsBounded(v)) {
28250 break
28251 }
28252 v.reset(OpAMD64ANDL)
28253 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28254 v0.AddArg2(x, y)
28255 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28256 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28257 v2.AuxInt = int16ToAuxInt(32)
28258 v2.AddArg(y)
28259 v1.AddArg(v2)
28260 v.AddArg2(v0, v1)
28261 return true
28262 }
28263
28264
28265
28266 for {
28267 x := v_0
28268 y := v_1
28269 if !(shiftIsBounded(v)) {
28270 break
28271 }
28272 v.reset(OpAMD64SHRL)
28273 v.AddArg2(x, y)
28274 return true
28275 }
28276 return false
28277 }
28278 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
28279 v_1 := v.Args[1]
28280 v_0 := v.Args[0]
28281 b := v.Block
28282
28283
28284
28285 for {
28286 t := v.Type
28287 x := v_0
28288 y := v_1
28289 if !(!shiftIsBounded(v)) {
28290 break
28291 }
28292 v.reset(OpAMD64ANDL)
28293 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28294 v0.AddArg2(x, y)
28295 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28296 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28297 v2.AuxInt = int32ToAuxInt(32)
28298 v2.AddArg(y)
28299 v1.AddArg(v2)
28300 v.AddArg2(v0, v1)
28301 return true
28302 }
28303
28304
28305
28306 for {
28307 x := v_0
28308 y := v_1
28309 if !(shiftIsBounded(v)) {
28310 break
28311 }
28312 v.reset(OpAMD64SHRL)
28313 v.AddArg2(x, y)
28314 return true
28315 }
28316 return false
28317 }
28318 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
28319 v_1 := v.Args[1]
28320 v_0 := v.Args[0]
28321 b := v.Block
28322
28323
28324
28325 for {
28326 t := v.Type
28327 x := v_0
28328 y := v_1
28329 if !(!shiftIsBounded(v)) {
28330 break
28331 }
28332 v.reset(OpAMD64ANDL)
28333 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28334 v0.AddArg2(x, y)
28335 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28336 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28337 v2.AuxInt = int32ToAuxInt(32)
28338 v2.AddArg(y)
28339 v1.AddArg(v2)
28340 v.AddArg2(v0, v1)
28341 return true
28342 }
28343
28344
28345
28346 for {
28347 x := v_0
28348 y := v_1
28349 if !(shiftIsBounded(v)) {
28350 break
28351 }
28352 v.reset(OpAMD64SHRL)
28353 v.AddArg2(x, y)
28354 return true
28355 }
28356 return false
28357 }
28358 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
28359 v_1 := v.Args[1]
28360 v_0 := v.Args[0]
28361 b := v.Block
28362
28363
28364
28365 for {
28366 t := v.Type
28367 x := v_0
28368 y := v_1
28369 if !(!shiftIsBounded(v)) {
28370 break
28371 }
28372 v.reset(OpAMD64ANDL)
28373 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28374 v0.AddArg2(x, y)
28375 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28376 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28377 v2.AuxInt = int8ToAuxInt(32)
28378 v2.AddArg(y)
28379 v1.AddArg(v2)
28380 v.AddArg2(v0, v1)
28381 return true
28382 }
28383
28384
28385
28386 for {
28387 x := v_0
28388 y := v_1
28389 if !(shiftIsBounded(v)) {
28390 break
28391 }
28392 v.reset(OpAMD64SHRL)
28393 v.AddArg2(x, y)
28394 return true
28395 }
28396 return false
28397 }
28398 func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
28399 v_1 := v.Args[1]
28400 v_0 := v.Args[0]
28401 b := v.Block
28402
28403
28404
28405 for {
28406 t := v.Type
28407 x := v_0
28408 y := v_1
28409 if !(!shiftIsBounded(v)) {
28410 break
28411 }
28412 v.reset(OpAMD64SARL)
28413 v.Type = t
28414 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28415 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28416 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28417 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28418 v3.AuxInt = int16ToAuxInt(32)
28419 v3.AddArg(y)
28420 v2.AddArg(v3)
28421 v1.AddArg(v2)
28422 v0.AddArg2(y, v1)
28423 v.AddArg2(x, v0)
28424 return true
28425 }
28426
28427
28428
28429 for {
28430 x := v_0
28431 y := v_1
28432 if !(shiftIsBounded(v)) {
28433 break
28434 }
28435 v.reset(OpAMD64SARL)
28436 v.AddArg2(x, y)
28437 return true
28438 }
28439 return false
28440 }
28441 func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
28442 v_1 := v.Args[1]
28443 v_0 := v.Args[0]
28444 b := v.Block
28445
28446
28447
28448 for {
28449 t := v.Type
28450 x := v_0
28451 y := v_1
28452 if !(!shiftIsBounded(v)) {
28453 break
28454 }
28455 v.reset(OpAMD64SARL)
28456 v.Type = t
28457 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28458 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28459 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28460 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28461 v3.AuxInt = int32ToAuxInt(32)
28462 v3.AddArg(y)
28463 v2.AddArg(v3)
28464 v1.AddArg(v2)
28465 v0.AddArg2(y, v1)
28466 v.AddArg2(x, v0)
28467 return true
28468 }
28469
28470
28471
28472 for {
28473 x := v_0
28474 y := v_1
28475 if !(shiftIsBounded(v)) {
28476 break
28477 }
28478 v.reset(OpAMD64SARL)
28479 v.AddArg2(x, y)
28480 return true
28481 }
28482 return false
28483 }
28484 func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
28485 v_1 := v.Args[1]
28486 v_0 := v.Args[0]
28487 b := v.Block
28488
28489
28490
28491 for {
28492 t := v.Type
28493 x := v_0
28494 y := v_1
28495 if !(!shiftIsBounded(v)) {
28496 break
28497 }
28498 v.reset(OpAMD64SARL)
28499 v.Type = t
28500 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28501 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28502 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28503 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28504 v3.AuxInt = int32ToAuxInt(32)
28505 v3.AddArg(y)
28506 v2.AddArg(v3)
28507 v1.AddArg(v2)
28508 v0.AddArg2(y, v1)
28509 v.AddArg2(x, v0)
28510 return true
28511 }
28512
28513
28514
28515 for {
28516 x := v_0
28517 y := v_1
28518 if !(shiftIsBounded(v)) {
28519 break
28520 }
28521 v.reset(OpAMD64SARL)
28522 v.AddArg2(x, y)
28523 return true
28524 }
28525 return false
28526 }
28527 func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
28528 v_1 := v.Args[1]
28529 v_0 := v.Args[0]
28530 b := v.Block
28531
28532
28533
28534 for {
28535 t := v.Type
28536 x := v_0
28537 y := v_1
28538 if !(!shiftIsBounded(v)) {
28539 break
28540 }
28541 v.reset(OpAMD64SARL)
28542 v.Type = t
28543 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28544 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28545 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28546 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28547 v3.AuxInt = int8ToAuxInt(32)
28548 v3.AddArg(y)
28549 v2.AddArg(v3)
28550 v1.AddArg(v2)
28551 v0.AddArg2(y, v1)
28552 v.AddArg2(x, v0)
28553 return true
28554 }
28555
28556
28557
28558 for {
28559 x := v_0
28560 y := v_1
28561 if !(shiftIsBounded(v)) {
28562 break
28563 }
28564 v.reset(OpAMD64SARL)
28565 v.AddArg2(x, y)
28566 return true
28567 }
28568 return false
28569 }
28570 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
28571 v_1 := v.Args[1]
28572 v_0 := v.Args[0]
28573 b := v.Block
28574
28575
28576
28577 for {
28578 t := v.Type
28579 x := v_0
28580 y := v_1
28581 if !(!shiftIsBounded(v)) {
28582 break
28583 }
28584 v.reset(OpAMD64ANDQ)
28585 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28586 v0.AddArg2(x, y)
28587 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28588 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28589 v2.AuxInt = int16ToAuxInt(64)
28590 v2.AddArg(y)
28591 v1.AddArg(v2)
28592 v.AddArg2(v0, v1)
28593 return true
28594 }
28595
28596
28597
28598 for {
28599 x := v_0
28600 y := v_1
28601 if !(shiftIsBounded(v)) {
28602 break
28603 }
28604 v.reset(OpAMD64SHRQ)
28605 v.AddArg2(x, y)
28606 return true
28607 }
28608 return false
28609 }
28610 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
28611 v_1 := v.Args[1]
28612 v_0 := v.Args[0]
28613 b := v.Block
28614
28615
28616
28617 for {
28618 t := v.Type
28619 x := v_0
28620 y := v_1
28621 if !(!shiftIsBounded(v)) {
28622 break
28623 }
28624 v.reset(OpAMD64ANDQ)
28625 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28626 v0.AddArg2(x, y)
28627 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28628 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28629 v2.AuxInt = int32ToAuxInt(64)
28630 v2.AddArg(y)
28631 v1.AddArg(v2)
28632 v.AddArg2(v0, v1)
28633 return true
28634 }
28635
28636
28637
28638 for {
28639 x := v_0
28640 y := v_1
28641 if !(shiftIsBounded(v)) {
28642 break
28643 }
28644 v.reset(OpAMD64SHRQ)
28645 v.AddArg2(x, y)
28646 return true
28647 }
28648 return false
28649 }
28650 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
28651 v_1 := v.Args[1]
28652 v_0 := v.Args[0]
28653 b := v.Block
28654
28655
28656
28657 for {
28658 t := v.Type
28659 x := v_0
28660 y := v_1
28661 if !(!shiftIsBounded(v)) {
28662 break
28663 }
28664 v.reset(OpAMD64ANDQ)
28665 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28666 v0.AddArg2(x, y)
28667 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28668 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28669 v2.AuxInt = int32ToAuxInt(64)
28670 v2.AddArg(y)
28671 v1.AddArg(v2)
28672 v.AddArg2(v0, v1)
28673 return true
28674 }
28675
28676
28677
28678 for {
28679 x := v_0
28680 y := v_1
28681 if !(shiftIsBounded(v)) {
28682 break
28683 }
28684 v.reset(OpAMD64SHRQ)
28685 v.AddArg2(x, y)
28686 return true
28687 }
28688 return false
28689 }
28690 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
28691 v_1 := v.Args[1]
28692 v_0 := v.Args[0]
28693 b := v.Block
28694
28695
28696
28697 for {
28698 t := v.Type
28699 x := v_0
28700 y := v_1
28701 if !(!shiftIsBounded(v)) {
28702 break
28703 }
28704 v.reset(OpAMD64ANDQ)
28705 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28706 v0.AddArg2(x, y)
28707 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28708 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28709 v2.AuxInt = int8ToAuxInt(64)
28710 v2.AddArg(y)
28711 v1.AddArg(v2)
28712 v.AddArg2(v0, v1)
28713 return true
28714 }
28715
28716
28717
28718 for {
28719 x := v_0
28720 y := v_1
28721 if !(shiftIsBounded(v)) {
28722 break
28723 }
28724 v.reset(OpAMD64SHRQ)
28725 v.AddArg2(x, y)
28726 return true
28727 }
28728 return false
28729 }
28730 func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
28731 v_1 := v.Args[1]
28732 v_0 := v.Args[0]
28733 b := v.Block
28734
28735
28736
28737 for {
28738 t := v.Type
28739 x := v_0
28740 y := v_1
28741 if !(!shiftIsBounded(v)) {
28742 break
28743 }
28744 v.reset(OpAMD64SARQ)
28745 v.Type = t
28746 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28747 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28748 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28749 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28750 v3.AuxInt = int16ToAuxInt(64)
28751 v3.AddArg(y)
28752 v2.AddArg(v3)
28753 v1.AddArg(v2)
28754 v0.AddArg2(y, v1)
28755 v.AddArg2(x, v0)
28756 return true
28757 }
28758
28759
28760
28761 for {
28762 x := v_0
28763 y := v_1
28764 if !(shiftIsBounded(v)) {
28765 break
28766 }
28767 v.reset(OpAMD64SARQ)
28768 v.AddArg2(x, y)
28769 return true
28770 }
28771 return false
28772 }
28773 func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
28774 v_1 := v.Args[1]
28775 v_0 := v.Args[0]
28776 b := v.Block
28777
28778
28779
28780 for {
28781 t := v.Type
28782 x := v_0
28783 y := v_1
28784 if !(!shiftIsBounded(v)) {
28785 break
28786 }
28787 v.reset(OpAMD64SARQ)
28788 v.Type = t
28789 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28790 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28791 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28792 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28793 v3.AuxInt = int32ToAuxInt(64)
28794 v3.AddArg(y)
28795 v2.AddArg(v3)
28796 v1.AddArg(v2)
28797 v0.AddArg2(y, v1)
28798 v.AddArg2(x, v0)
28799 return true
28800 }
28801
28802
28803
28804 for {
28805 x := v_0
28806 y := v_1
28807 if !(shiftIsBounded(v)) {
28808 break
28809 }
28810 v.reset(OpAMD64SARQ)
28811 v.AddArg2(x, y)
28812 return true
28813 }
28814 return false
28815 }
28816 func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
28817 v_1 := v.Args[1]
28818 v_0 := v.Args[0]
28819 b := v.Block
28820
28821
28822
28823 for {
28824 t := v.Type
28825 x := v_0
28826 y := v_1
28827 if !(!shiftIsBounded(v)) {
28828 break
28829 }
28830 v.reset(OpAMD64SARQ)
28831 v.Type = t
28832 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28833 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28834 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28835 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28836 v3.AuxInt = int32ToAuxInt(64)
28837 v3.AddArg(y)
28838 v2.AddArg(v3)
28839 v1.AddArg(v2)
28840 v0.AddArg2(y, v1)
28841 v.AddArg2(x, v0)
28842 return true
28843 }
28844
28845
28846
28847 for {
28848 x := v_0
28849 y := v_1
28850 if !(shiftIsBounded(v)) {
28851 break
28852 }
28853 v.reset(OpAMD64SARQ)
28854 v.AddArg2(x, y)
28855 return true
28856 }
28857 return false
28858 }
28859 func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
28860 v_1 := v.Args[1]
28861 v_0 := v.Args[0]
28862 b := v.Block
28863
28864
28865
28866 for {
28867 t := v.Type
28868 x := v_0
28869 y := v_1
28870 if !(!shiftIsBounded(v)) {
28871 break
28872 }
28873 v.reset(OpAMD64SARQ)
28874 v.Type = t
28875 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28876 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28877 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28878 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28879 v3.AuxInt = int8ToAuxInt(64)
28880 v3.AddArg(y)
28881 v2.AddArg(v3)
28882 v1.AddArg(v2)
28883 v0.AddArg2(y, v1)
28884 v.AddArg2(x, v0)
28885 return true
28886 }
28887
28888
28889
28890 for {
28891 x := v_0
28892 y := v_1
28893 if !(shiftIsBounded(v)) {
28894 break
28895 }
28896 v.reset(OpAMD64SARQ)
28897 v.AddArg2(x, y)
28898 return true
28899 }
28900 return false
28901 }
28902 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
28903 v_1 := v.Args[1]
28904 v_0 := v.Args[0]
28905 b := v.Block
28906
28907
28908
28909 for {
28910 t := v.Type
28911 x := v_0
28912 y := v_1
28913 if !(!shiftIsBounded(v)) {
28914 break
28915 }
28916 v.reset(OpAMD64ANDL)
28917 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28918 v0.AddArg2(x, y)
28919 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28920 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28921 v2.AuxInt = int16ToAuxInt(8)
28922 v2.AddArg(y)
28923 v1.AddArg(v2)
28924 v.AddArg2(v0, v1)
28925 return true
28926 }
28927
28928
28929
28930 for {
28931 x := v_0
28932 y := v_1
28933 if !(shiftIsBounded(v)) {
28934 break
28935 }
28936 v.reset(OpAMD64SHRB)
28937 v.AddArg2(x, y)
28938 return true
28939 }
28940 return false
28941 }
28942 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
28943 v_1 := v.Args[1]
28944 v_0 := v.Args[0]
28945 b := v.Block
28946
28947
28948
28949 for {
28950 t := v.Type
28951 x := v_0
28952 y := v_1
28953 if !(!shiftIsBounded(v)) {
28954 break
28955 }
28956 v.reset(OpAMD64ANDL)
28957 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28958 v0.AddArg2(x, y)
28959 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28960 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28961 v2.AuxInt = int32ToAuxInt(8)
28962 v2.AddArg(y)
28963 v1.AddArg(v2)
28964 v.AddArg2(v0, v1)
28965 return true
28966 }
28967
28968
28969
28970 for {
28971 x := v_0
28972 y := v_1
28973 if !(shiftIsBounded(v)) {
28974 break
28975 }
28976 v.reset(OpAMD64SHRB)
28977 v.AddArg2(x, y)
28978 return true
28979 }
28980 return false
28981 }
28982 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
28983 v_1 := v.Args[1]
28984 v_0 := v.Args[0]
28985 b := v.Block
28986
28987
28988
28989 for {
28990 t := v.Type
28991 x := v_0
28992 y := v_1
28993 if !(!shiftIsBounded(v)) {
28994 break
28995 }
28996 v.reset(OpAMD64ANDL)
28997 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28998 v0.AddArg2(x, y)
28999 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29000 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29001 v2.AuxInt = int32ToAuxInt(8)
29002 v2.AddArg(y)
29003 v1.AddArg(v2)
29004 v.AddArg2(v0, v1)
29005 return true
29006 }
29007
29008
29009
29010 for {
29011 x := v_0
29012 y := v_1
29013 if !(shiftIsBounded(v)) {
29014 break
29015 }
29016 v.reset(OpAMD64SHRB)
29017 v.AddArg2(x, y)
29018 return true
29019 }
29020 return false
29021 }
29022 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
29023 v_1 := v.Args[1]
29024 v_0 := v.Args[0]
29025 b := v.Block
29026
29027
29028
29029 for {
29030 t := v.Type
29031 x := v_0
29032 y := v_1
29033 if !(!shiftIsBounded(v)) {
29034 break
29035 }
29036 v.reset(OpAMD64ANDL)
29037 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29038 v0.AddArg2(x, y)
29039 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29040 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29041 v2.AuxInt = int8ToAuxInt(8)
29042 v2.AddArg(y)
29043 v1.AddArg(v2)
29044 v.AddArg2(v0, v1)
29045 return true
29046 }
29047
29048
29049
29050 for {
29051 x := v_0
29052 y := v_1
29053 if !(shiftIsBounded(v)) {
29054 break
29055 }
29056 v.reset(OpAMD64SHRB)
29057 v.AddArg2(x, y)
29058 return true
29059 }
29060 return false
29061 }
29062 func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
29063 v_1 := v.Args[1]
29064 v_0 := v.Args[0]
29065 b := v.Block
29066
29067
29068
29069 for {
29070 t := v.Type
29071 x := v_0
29072 y := v_1
29073 if !(!shiftIsBounded(v)) {
29074 break
29075 }
29076 v.reset(OpAMD64SARB)
29077 v.Type = t
29078 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29079 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29080 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29081 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29082 v3.AuxInt = int16ToAuxInt(8)
29083 v3.AddArg(y)
29084 v2.AddArg(v3)
29085 v1.AddArg(v2)
29086 v0.AddArg2(y, v1)
29087 v.AddArg2(x, v0)
29088 return true
29089 }
29090
29091
29092
29093 for {
29094 x := v_0
29095 y := v_1
29096 if !(shiftIsBounded(v)) {
29097 break
29098 }
29099 v.reset(OpAMD64SARB)
29100 v.AddArg2(x, y)
29101 return true
29102 }
29103 return false
29104 }
29105 func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
29106 v_1 := v.Args[1]
29107 v_0 := v.Args[0]
29108 b := v.Block
29109
29110
29111
29112 for {
29113 t := v.Type
29114 x := v_0
29115 y := v_1
29116 if !(!shiftIsBounded(v)) {
29117 break
29118 }
29119 v.reset(OpAMD64SARB)
29120 v.Type = t
29121 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29122 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29123 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29124 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29125 v3.AuxInt = int32ToAuxInt(8)
29126 v3.AddArg(y)
29127 v2.AddArg(v3)
29128 v1.AddArg(v2)
29129 v0.AddArg2(y, v1)
29130 v.AddArg2(x, v0)
29131 return true
29132 }
29133
29134
29135
29136 for {
29137 x := v_0
29138 y := v_1
29139 if !(shiftIsBounded(v)) {
29140 break
29141 }
29142 v.reset(OpAMD64SARB)
29143 v.AddArg2(x, y)
29144 return true
29145 }
29146 return false
29147 }
29148 func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
29149 v_1 := v.Args[1]
29150 v_0 := v.Args[0]
29151 b := v.Block
29152
29153
29154
29155 for {
29156 t := v.Type
29157 x := v_0
29158 y := v_1
29159 if !(!shiftIsBounded(v)) {
29160 break
29161 }
29162 v.reset(OpAMD64SARB)
29163 v.Type = t
29164 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
29165 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
29166 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
29167 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29168 v3.AuxInt = int32ToAuxInt(8)
29169 v3.AddArg(y)
29170 v2.AddArg(v3)
29171 v1.AddArg(v2)
29172 v0.AddArg2(y, v1)
29173 v.AddArg2(x, v0)
29174 return true
29175 }
29176
29177
29178
29179 for {
29180 x := v_0
29181 y := v_1
29182 if !(shiftIsBounded(v)) {
29183 break
29184 }
29185 v.reset(OpAMD64SARB)
29186 v.AddArg2(x, y)
29187 return true
29188 }
29189 return false
29190 }
29191 func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
29192 v_1 := v.Args[1]
29193 v_0 := v.Args[0]
29194 b := v.Block
29195
29196
29197
29198 for {
29199 t := v.Type
29200 x := v_0
29201 y := v_1
29202 if !(!shiftIsBounded(v)) {
29203 break
29204 }
29205 v.reset(OpAMD64SARB)
29206 v.Type = t
29207 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29208 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29209 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29210 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29211 v3.AuxInt = int8ToAuxInt(8)
29212 v3.AddArg(y)
29213 v2.AddArg(v3)
29214 v1.AddArg(v2)
29215 v0.AddArg2(y, v1)
29216 v.AddArg2(x, v0)
29217 return true
29218 }
29219
29220
29221
29222 for {
29223 x := v_0
29224 y := v_1
29225 if !(shiftIsBounded(v)) {
29226 break
29227 }
29228 v.reset(OpAMD64SARB)
29229 v.AddArg2(x, y)
29230 return true
29231 }
29232 return false
29233 }
29234 func rewriteValueAMD64_OpSelect0(v *Value) bool {
29235 v_0 := v.Args[0]
29236 b := v.Block
29237 typ := &b.Func.Config.Types
29238
29239
29240 for {
29241 if v_0.Op != OpMul64uover {
29242 break
29243 }
29244 y := v_0.Args[1]
29245 x := v_0.Args[0]
29246 v.reset(OpSelect0)
29247 v.Type = typ.UInt64
29248 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29249 v0.AddArg2(x, y)
29250 v.AddArg(v0)
29251 return true
29252 }
29253
29254
29255 for {
29256 if v_0.Op != OpMul32uover {
29257 break
29258 }
29259 y := v_0.Args[1]
29260 x := v_0.Args[0]
29261 v.reset(OpSelect0)
29262 v.Type = typ.UInt32
29263 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29264 v0.AddArg2(x, y)
29265 v.AddArg(v0)
29266 return true
29267 }
29268
29269
29270 for {
29271 if v_0.Op != OpAdd64carry {
29272 break
29273 }
29274 c := v_0.Args[2]
29275 x := v_0.Args[0]
29276 y := v_0.Args[1]
29277 v.reset(OpSelect0)
29278 v.Type = typ.UInt64
29279 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29280 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29281 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29282 v2.AddArg(c)
29283 v1.AddArg(v2)
29284 v0.AddArg3(x, y, v1)
29285 v.AddArg(v0)
29286 return true
29287 }
29288
29289
29290 for {
29291 if v_0.Op != OpSub64borrow {
29292 break
29293 }
29294 c := v_0.Args[2]
29295 x := v_0.Args[0]
29296 y := v_0.Args[1]
29297 v.reset(OpSelect0)
29298 v.Type = typ.UInt64
29299 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29300 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29301 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29302 v2.AddArg(c)
29303 v1.AddArg(v2)
29304 v0.AddArg3(x, y, v1)
29305 v.AddArg(v0)
29306 return true
29307 }
29308
29309
29310 for {
29311 t := v.Type
29312 if v_0.Op != OpAMD64AddTupleFirst32 {
29313 break
29314 }
29315 tuple := v_0.Args[1]
29316 val := v_0.Args[0]
29317 v.reset(OpAMD64ADDL)
29318 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29319 v0.AddArg(tuple)
29320 v.AddArg2(val, v0)
29321 return true
29322 }
29323
29324
29325 for {
29326 t := v.Type
29327 if v_0.Op != OpAMD64AddTupleFirst64 {
29328 break
29329 }
29330 tuple := v_0.Args[1]
29331 val := v_0.Args[0]
29332 v.reset(OpAMD64ADDQ)
29333 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29334 v0.AddArg(tuple)
29335 v.AddArg2(val, v0)
29336 return true
29337 }
29338 return false
29339 }
29340 func rewriteValueAMD64_OpSelect1(v *Value) bool {
29341 v_0 := v.Args[0]
29342 b := v.Block
29343 typ := &b.Func.Config.Types
29344
29345
29346 for {
29347 if v_0.Op != OpMul64uover {
29348 break
29349 }
29350 y := v_0.Args[1]
29351 x := v_0.Args[0]
29352 v.reset(OpAMD64SETO)
29353 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29354 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29355 v1.AddArg2(x, y)
29356 v0.AddArg(v1)
29357 v.AddArg(v0)
29358 return true
29359 }
29360
29361
29362 for {
29363 if v_0.Op != OpMul32uover {
29364 break
29365 }
29366 y := v_0.Args[1]
29367 x := v_0.Args[0]
29368 v.reset(OpAMD64SETO)
29369 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29370 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29371 v1.AddArg2(x, y)
29372 v0.AddArg(v1)
29373 v.AddArg(v0)
29374 return true
29375 }
29376
29377
29378 for {
29379 if v_0.Op != OpAdd64carry {
29380 break
29381 }
29382 c := v_0.Args[2]
29383 x := v_0.Args[0]
29384 y := v_0.Args[1]
29385 v.reset(OpAMD64NEGQ)
29386 v.Type = typ.UInt64
29387 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
29388 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29389 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29390 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29391 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29392 v4.AddArg(c)
29393 v3.AddArg(v4)
29394 v2.AddArg3(x, y, v3)
29395 v1.AddArg(v2)
29396 v0.AddArg(v1)
29397 v.AddArg(v0)
29398 return true
29399 }
29400
29401
29402 for {
29403 if v_0.Op != OpSub64borrow {
29404 break
29405 }
29406 c := v_0.Args[2]
29407 x := v_0.Args[0]
29408 y := v_0.Args[1]
29409 v.reset(OpAMD64NEGQ)
29410 v.Type = typ.UInt64
29411 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
29412 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29413 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29414 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29415 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29416 v4.AddArg(c)
29417 v3.AddArg(v4)
29418 v2.AddArg3(x, y, v3)
29419 v1.AddArg(v2)
29420 v0.AddArg(v1)
29421 v.AddArg(v0)
29422 return true
29423 }
29424
29425
29426 for {
29427 if v_0.Op != OpAMD64NEGLflags {
29428 break
29429 }
29430 v_0_0 := v_0.Args[0]
29431 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
29432 break
29433 }
29434 v.reset(OpAMD64FlagEQ)
29435 return true
29436 }
29437
29438
29439 for {
29440 if v_0.Op != OpAMD64NEGLflags {
29441 break
29442 }
29443 v_0_0 := v_0.Args[0]
29444 if v_0_0.Op != OpAMD64NEGQ {
29445 break
29446 }
29447 v_0_0_0 := v_0_0.Args[0]
29448 if v_0_0_0.Op != OpAMD64SBBQcarrymask {
29449 break
29450 }
29451 x := v_0_0_0.Args[0]
29452 v.copyOf(x)
29453 return true
29454 }
29455
29456
29457 for {
29458 if v_0.Op != OpAMD64AddTupleFirst32 {
29459 break
29460 }
29461 tuple := v_0.Args[1]
29462 v.reset(OpSelect1)
29463 v.AddArg(tuple)
29464 return true
29465 }
29466
29467
29468 for {
29469 if v_0.Op != OpAMD64AddTupleFirst64 {
29470 break
29471 }
29472 tuple := v_0.Args[1]
29473 v.reset(OpSelect1)
29474 v.AddArg(tuple)
29475 return true
29476 }
29477 return false
29478 }
29479 func rewriteValueAMD64_OpSelectN(v *Value) bool {
29480 v_0 := v.Args[0]
29481 b := v.Block
29482 config := b.Func.Config
29483
29484
29485
29486 for {
29487 if auxIntToInt64(v.AuxInt) != 0 {
29488 break
29489 }
29490 call := v_0
29491 if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 {
29492 break
29493 }
29494 sym := auxToCall(call.Aux)
29495 s1 := call.Args[0]
29496 if s1.Op != OpAMD64MOVQstoreconst {
29497 break
29498 }
29499 sc := auxIntToValAndOff(s1.AuxInt)
29500 _ = s1.Args[1]
29501 s2 := s1.Args[1]
29502 if s2.Op != OpAMD64MOVQstore {
29503 break
29504 }
29505 _ = s2.Args[2]
29506 src := s2.Args[1]
29507 s3 := s2.Args[2]
29508 if s3.Op != OpAMD64MOVQstore {
29509 break
29510 }
29511 mem := s3.Args[2]
29512 dst := s3.Args[1]
29513 if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) {
29514 break
29515 }
29516 v.reset(OpMove)
29517 v.AuxInt = int64ToAuxInt(sc.Val64())
29518 v.AddArg3(dst, src, mem)
29519 return true
29520 }
29521
29522
29523
29524 for {
29525 if auxIntToInt64(v.AuxInt) != 0 {
29526 break
29527 }
29528 call := v_0
29529 if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 {
29530 break
29531 }
29532 sym := auxToCall(call.Aux)
29533 mem := call.Args[3]
29534 dst := call.Args[0]
29535 src := call.Args[1]
29536 call_2 := call.Args[2]
29537 if call_2.Op != OpAMD64MOVQconst {
29538 break
29539 }
29540 sz := auxIntToInt64(call_2.AuxInt)
29541 if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
29542 break
29543 }
29544 v.reset(OpMove)
29545 v.AuxInt = int64ToAuxInt(sz)
29546 v.AddArg3(dst, src, mem)
29547 return true
29548 }
29549 return false
29550 }
29551 func rewriteValueAMD64_OpSlicemask(v *Value) bool {
29552 v_0 := v.Args[0]
29553 b := v.Block
29554
29555
29556 for {
29557 t := v.Type
29558 x := v_0
29559 v.reset(OpAMD64SARQconst)
29560 v.AuxInt = int8ToAuxInt(63)
29561 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
29562 v0.AddArg(x)
29563 v.AddArg(v0)
29564 return true
29565 }
29566 }
29567 func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
29568 v_1 := v.Args[1]
29569 v_0 := v.Args[0]
29570 b := v.Block
29571 typ := &b.Func.Config.Types
29572
29573
29574 for {
29575 x := v_0
29576 y := v_1
29577 v.reset(OpAMD64CMOVQCC)
29578 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
29579 v0.AuxInt = int64ToAuxInt(0)
29580 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29581 v1.AddArg2(x, y)
29582 v.AddArg3(x, v0, v1)
29583 return true
29584 }
29585 }
29586 func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
29587 v_1 := v.Args[1]
29588 v_0 := v.Args[0]
29589 b := v.Block
29590 typ := &b.Func.Config.Types
29591
29592
29593 for {
29594 x := v_0
29595 y := v_1
29596 v.reset(OpAMD64CMOVQHI)
29597 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
29598 v0.AuxInt = int64ToAuxInt(0)
29599 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29600 v1.AddArg2(x, y)
29601 v.AddArg3(x, v0, v1)
29602 return true
29603 }
29604 }
29605 func rewriteValueAMD64_OpStore(v *Value) bool {
29606 v_2 := v.Args[2]
29607 v_1 := v.Args[1]
29608 v_0 := v.Args[0]
29609
29610
29611
29612 for {
29613 t := auxToType(v.Aux)
29614 ptr := v_0
29615 val := v_1
29616 mem := v_2
29617 if !(t.Size() == 8 && t.IsFloat()) {
29618 break
29619 }
29620 v.reset(OpAMD64MOVSDstore)
29621 v.AddArg3(ptr, val, mem)
29622 return true
29623 }
29624
29625
29626
29627 for {
29628 t := auxToType(v.Aux)
29629 ptr := v_0
29630 val := v_1
29631 mem := v_2
29632 if !(t.Size() == 4 && t.IsFloat()) {
29633 break
29634 }
29635 v.reset(OpAMD64MOVSSstore)
29636 v.AddArg3(ptr, val, mem)
29637 return true
29638 }
29639
29640
29641
29642 for {
29643 t := auxToType(v.Aux)
29644 ptr := v_0
29645 val := v_1
29646 mem := v_2
29647 if !(t.Size() == 8 && !t.IsFloat()) {
29648 break
29649 }
29650 v.reset(OpAMD64MOVQstore)
29651 v.AddArg3(ptr, val, mem)
29652 return true
29653 }
29654
29655
29656
29657 for {
29658 t := auxToType(v.Aux)
29659 ptr := v_0
29660 val := v_1
29661 mem := v_2
29662 if !(t.Size() == 4 && !t.IsFloat()) {
29663 break
29664 }
29665 v.reset(OpAMD64MOVLstore)
29666 v.AddArg3(ptr, val, mem)
29667 return true
29668 }
29669
29670
29671
29672 for {
29673 t := auxToType(v.Aux)
29674 ptr := v_0
29675 val := v_1
29676 mem := v_2
29677 if !(t.Size() == 2) {
29678 break
29679 }
29680 v.reset(OpAMD64MOVWstore)
29681 v.AddArg3(ptr, val, mem)
29682 return true
29683 }
29684
29685
29686
29687 for {
29688 t := auxToType(v.Aux)
29689 ptr := v_0
29690 val := v_1
29691 mem := v_2
29692 if !(t.Size() == 1) {
29693 break
29694 }
29695 v.reset(OpAMD64MOVBstore)
29696 v.AddArg3(ptr, val, mem)
29697 return true
29698 }
29699 return false
29700 }
29701 func rewriteValueAMD64_OpTrunc(v *Value) bool {
29702 v_0 := v.Args[0]
29703
29704
29705 for {
29706 x := v_0
29707 v.reset(OpAMD64ROUNDSD)
29708 v.AuxInt = int8ToAuxInt(3)
29709 v.AddArg(x)
29710 return true
29711 }
29712 }
29713 func rewriteValueAMD64_OpZero(v *Value) bool {
29714 v_1 := v.Args[1]
29715 v_0 := v.Args[0]
29716 b := v.Block
29717 config := b.Func.Config
29718 typ := &b.Func.Config.Types
29719
29720
29721 for {
29722 if auxIntToInt64(v.AuxInt) != 0 {
29723 break
29724 }
29725 mem := v_1
29726 v.copyOf(mem)
29727 return true
29728 }
29729
29730
29731 for {
29732 if auxIntToInt64(v.AuxInt) != 1 {
29733 break
29734 }
29735 destptr := v_0
29736 mem := v_1
29737 v.reset(OpAMD64MOVBstoreconst)
29738 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29739 v.AddArg2(destptr, mem)
29740 return true
29741 }
29742
29743
29744 for {
29745 if auxIntToInt64(v.AuxInt) != 2 {
29746 break
29747 }
29748 destptr := v_0
29749 mem := v_1
29750 v.reset(OpAMD64MOVWstoreconst)
29751 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29752 v.AddArg2(destptr, mem)
29753 return true
29754 }
29755
29756
29757 for {
29758 if auxIntToInt64(v.AuxInt) != 4 {
29759 break
29760 }
29761 destptr := v_0
29762 mem := v_1
29763 v.reset(OpAMD64MOVLstoreconst)
29764 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29765 v.AddArg2(destptr, mem)
29766 return true
29767 }
29768
29769
29770 for {
29771 if auxIntToInt64(v.AuxInt) != 8 {
29772 break
29773 }
29774 destptr := v_0
29775 mem := v_1
29776 v.reset(OpAMD64MOVQstoreconst)
29777 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29778 v.AddArg2(destptr, mem)
29779 return true
29780 }
29781
29782
29783 for {
29784 if auxIntToInt64(v.AuxInt) != 3 {
29785 break
29786 }
29787 destptr := v_0
29788 mem := v_1
29789 v.reset(OpAMD64MOVBstoreconst)
29790 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
29791 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
29792 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29793 v0.AddArg2(destptr, mem)
29794 v.AddArg2(destptr, v0)
29795 return true
29796 }
29797
29798
29799 for {
29800 if auxIntToInt64(v.AuxInt) != 5 {
29801 break
29802 }
29803 destptr := v_0
29804 mem := v_1
29805 v.reset(OpAMD64MOVBstoreconst)
29806 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
29807 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29808 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29809 v0.AddArg2(destptr, mem)
29810 v.AddArg2(destptr, v0)
29811 return true
29812 }
29813
29814
29815 for {
29816 if auxIntToInt64(v.AuxInt) != 6 {
29817 break
29818 }
29819 destptr := v_0
29820 mem := v_1
29821 v.reset(OpAMD64MOVWstoreconst)
29822 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
29823 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29824 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29825 v0.AddArg2(destptr, mem)
29826 v.AddArg2(destptr, v0)
29827 return true
29828 }
29829
29830
29831 for {
29832 if auxIntToInt64(v.AuxInt) != 7 {
29833 break
29834 }
29835 destptr := v_0
29836 mem := v_1
29837 v.reset(OpAMD64MOVLstoreconst)
29838 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
29839 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29840 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29841 v0.AddArg2(destptr, mem)
29842 v.AddArg2(destptr, v0)
29843 return true
29844 }
29845
29846
29847
29848 for {
29849 s := auxIntToInt64(v.AuxInt)
29850 destptr := v_0
29851 mem := v_1
29852 if !(s%8 != 0 && s > 8 && !config.useSSE) {
29853 break
29854 }
29855 v.reset(OpZero)
29856 v.AuxInt = int64ToAuxInt(s - s%8)
29857 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
29858 v0.AuxInt = int64ToAuxInt(s % 8)
29859 v0.AddArg(destptr)
29860 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29861 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29862 v1.AddArg2(destptr, mem)
29863 v.AddArg2(v0, v1)
29864 return true
29865 }
29866
29867
29868
29869 for {
29870 if auxIntToInt64(v.AuxInt) != 16 {
29871 break
29872 }
29873 destptr := v_0
29874 mem := v_1
29875 if !(!config.useSSE) {
29876 break
29877 }
29878 v.reset(OpAMD64MOVQstoreconst)
29879 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29880 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29881 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29882 v0.AddArg2(destptr, mem)
29883 v.AddArg2(destptr, v0)
29884 return true
29885 }
29886
29887
29888
29889 for {
29890 if auxIntToInt64(v.AuxInt) != 24 {
29891 break
29892 }
29893 destptr := v_0
29894 mem := v_1
29895 if !(!config.useSSE) {
29896 break
29897 }
29898 v.reset(OpAMD64MOVQstoreconst)
29899 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
29900 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29901 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29902 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29903 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29904 v1.AddArg2(destptr, mem)
29905 v0.AddArg2(destptr, v1)
29906 v.AddArg2(destptr, v0)
29907 return true
29908 }
29909
29910
29911
29912 for {
29913 if auxIntToInt64(v.AuxInt) != 32 {
29914 break
29915 }
29916 destptr := v_0
29917 mem := v_1
29918 if !(!config.useSSE) {
29919 break
29920 }
29921 v.reset(OpAMD64MOVQstoreconst)
29922 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24))
29923 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29924 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
29925 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29926 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29927 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29928 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29929 v2.AddArg2(destptr, mem)
29930 v1.AddArg2(destptr, v2)
29931 v0.AddArg2(destptr, v1)
29932 v.AddArg2(destptr, v0)
29933 return true
29934 }
29935
29936
29937
29938 for {
29939 if auxIntToInt64(v.AuxInt) != 9 {
29940 break
29941 }
29942 destptr := v_0
29943 mem := v_1
29944 if !(config.useSSE) {
29945 break
29946 }
29947 v.reset(OpAMD64MOVBstoreconst)
29948 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29949 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29950 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29951 v0.AddArg2(destptr, mem)
29952 v.AddArg2(destptr, v0)
29953 return true
29954 }
29955
29956
29957
29958 for {
29959 if auxIntToInt64(v.AuxInt) != 10 {
29960 break
29961 }
29962 destptr := v_0
29963 mem := v_1
29964 if !(config.useSSE) {
29965 break
29966 }
29967 v.reset(OpAMD64MOVWstoreconst)
29968 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29969 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29970 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29971 v0.AddArg2(destptr, mem)
29972 v.AddArg2(destptr, v0)
29973 return true
29974 }
29975
29976
29977
29978 for {
29979 if auxIntToInt64(v.AuxInt) != 11 {
29980 break
29981 }
29982 destptr := v_0
29983 mem := v_1
29984 if !(config.useSSE) {
29985 break
29986 }
29987 v.reset(OpAMD64MOVLstoreconst)
29988 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7))
29989 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29990 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29991 v0.AddArg2(destptr, mem)
29992 v.AddArg2(destptr, v0)
29993 return true
29994 }
29995
29996
29997
29998 for {
29999 if auxIntToInt64(v.AuxInt) != 12 {
30000 break
30001 }
30002 destptr := v_0
30003 mem := v_1
30004 if !(config.useSSE) {
30005 break
30006 }
30007 v.reset(OpAMD64MOVLstoreconst)
30008 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30009 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30010 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30011 v0.AddArg2(destptr, mem)
30012 v.AddArg2(destptr, v0)
30013 return true
30014 }
30015
30016
30017
30018 for {
30019 s := auxIntToInt64(v.AuxInt)
30020 destptr := v_0
30021 mem := v_1
30022 if !(s > 12 && s < 16 && config.useSSE) {
30023 break
30024 }
30025 v.reset(OpAMD64MOVQstoreconst)
30026 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
30027 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30028 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30029 v0.AddArg2(destptr, mem)
30030 v.AddArg2(destptr, v0)
30031 return true
30032 }
30033
30034
30035
30036 for {
30037 s := auxIntToInt64(v.AuxInt)
30038 destptr := v_0
30039 mem := v_1
30040 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
30041 break
30042 }
30043 v.reset(OpZero)
30044 v.AuxInt = int64ToAuxInt(s - s%16)
30045 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
30046 v0.AuxInt = int64ToAuxInt(s % 16)
30047 v0.AddArg(destptr)
30048 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30049 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30050 v1.AddArg2(destptr, mem)
30051 v.AddArg2(v0, v1)
30052 return true
30053 }
30054
30055
30056
30057 for {
30058 s := auxIntToInt64(v.AuxInt)
30059 destptr := v_0
30060 mem := v_1
30061 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
30062 break
30063 }
30064 v.reset(OpZero)
30065 v.AuxInt = int64ToAuxInt(s - s%16)
30066 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
30067 v0.AuxInt = int64ToAuxInt(s % 16)
30068 v0.AddArg(destptr)
30069 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30070 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30071 v1.AddArg2(destptr, mem)
30072 v.AddArg2(v0, v1)
30073 return true
30074 }
30075
30076
30077
30078 for {
30079 if auxIntToInt64(v.AuxInt) != 16 {
30080 break
30081 }
30082 destptr := v_0
30083 mem := v_1
30084 if !(config.useSSE) {
30085 break
30086 }
30087 v.reset(OpAMD64MOVOstoreconst)
30088 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30089 v.AddArg2(destptr, mem)
30090 return true
30091 }
30092
30093
30094
30095 for {
30096 if auxIntToInt64(v.AuxInt) != 32 {
30097 break
30098 }
30099 destptr := v_0
30100 mem := v_1
30101 if !(config.useSSE) {
30102 break
30103 }
30104 v.reset(OpAMD64MOVOstoreconst)
30105 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30106 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30107 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30108 v0.AddArg2(destptr, mem)
30109 v.AddArg2(destptr, v0)
30110 return true
30111 }
30112
30113
30114
30115 for {
30116 if auxIntToInt64(v.AuxInt) != 48 {
30117 break
30118 }
30119 destptr := v_0
30120 mem := v_1
30121 if !(config.useSSE) {
30122 break
30123 }
30124 v.reset(OpAMD64MOVOstoreconst)
30125 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30126 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30127 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30128 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30129 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30130 v1.AddArg2(destptr, mem)
30131 v0.AddArg2(destptr, v1)
30132 v.AddArg2(destptr, v0)
30133 return true
30134 }
30135
30136
30137
30138 for {
30139 if auxIntToInt64(v.AuxInt) != 64 {
30140 break
30141 }
30142 destptr := v_0
30143 mem := v_1
30144 if !(config.useSSE) {
30145 break
30146 }
30147 v.reset(OpAMD64MOVOstoreconst)
30148 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48))
30149 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30150 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30151 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30152 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30153 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30154 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30155 v2.AddArg2(destptr, mem)
30156 v1.AddArg2(destptr, v2)
30157 v0.AddArg2(destptr, v1)
30158 v.AddArg2(destptr, v0)
30159 return true
30160 }
30161
30162
30163
30164 for {
30165 s := auxIntToInt64(v.AuxInt)
30166 destptr := v_0
30167 mem := v_1
30168 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
30169 break
30170 }
30171 v.reset(OpAMD64DUFFZERO)
30172 v.AuxInt = int64ToAuxInt(s)
30173 v.AddArg2(destptr, mem)
30174 return true
30175 }
30176
30177
30178
30179 for {
30180 s := auxIntToInt64(v.AuxInt)
30181 destptr := v_0
30182 mem := v_1
30183 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
30184 break
30185 }
30186 v.reset(OpAMD64REPSTOSQ)
30187 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30188 v0.AuxInt = int64ToAuxInt(s / 8)
30189 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30190 v1.AuxInt = int64ToAuxInt(0)
30191 v.AddArg4(destptr, v0, v1, mem)
30192 return true
30193 }
30194 return false
30195 }
30196 func rewriteBlockAMD64(b *Block) bool {
30197 typ := &b.Func.Config.Types
30198 switch b.Kind {
30199 case BlockAMD64EQ:
30200
30201
30202 for b.Controls[0].Op == OpAMD64TESTL {
30203 v_0 := b.Controls[0]
30204 _ = v_0.Args[1]
30205 v_0_0 := v_0.Args[0]
30206 v_0_1 := v_0.Args[1]
30207 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30208 if v_0_0.Op != OpAMD64SHLL {
30209 continue
30210 }
30211 x := v_0_0.Args[1]
30212 v_0_0_0 := v_0_0.Args[0]
30213 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
30214 continue
30215 }
30216 y := v_0_1
30217 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
30218 v0.AddArg2(x, y)
30219 b.resetWithControl(BlockAMD64UGE, v0)
30220 return true
30221 }
30222 break
30223 }
30224
30225
30226 for b.Controls[0].Op == OpAMD64TESTQ {
30227 v_0 := b.Controls[0]
30228 _ = v_0.Args[1]
30229 v_0_0 := v_0.Args[0]
30230 v_0_1 := v_0.Args[1]
30231 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30232 if v_0_0.Op != OpAMD64SHLQ {
30233 continue
30234 }
30235 x := v_0_0.Args[1]
30236 v_0_0_0 := v_0_0.Args[0]
30237 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
30238 continue
30239 }
30240 y := v_0_1
30241 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
30242 v0.AddArg2(x, y)
30243 b.resetWithControl(BlockAMD64UGE, v0)
30244 return true
30245 }
30246 break
30247 }
30248
30249
30250
30251 for b.Controls[0].Op == OpAMD64TESTLconst {
30252 v_0 := b.Controls[0]
30253 c := auxIntToInt32(v_0.AuxInt)
30254 x := v_0.Args[0]
30255 if !(isUint32PowerOfTwo(int64(c))) {
30256 break
30257 }
30258 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30259 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30260 v0.AddArg(x)
30261 b.resetWithControl(BlockAMD64UGE, v0)
30262 return true
30263 }
30264
30265
30266
30267 for b.Controls[0].Op == OpAMD64TESTQconst {
30268 v_0 := b.Controls[0]
30269 c := auxIntToInt32(v_0.AuxInt)
30270 x := v_0.Args[0]
30271 if !(isUint64PowerOfTwo(int64(c))) {
30272 break
30273 }
30274 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30275 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30276 v0.AddArg(x)
30277 b.resetWithControl(BlockAMD64UGE, v0)
30278 return true
30279 }
30280
30281
30282
30283 for b.Controls[0].Op == OpAMD64TESTQ {
30284 v_0 := b.Controls[0]
30285 _ = v_0.Args[1]
30286 v_0_0 := v_0.Args[0]
30287 v_0_1 := v_0.Args[1]
30288 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30289 if v_0_0.Op != OpAMD64MOVQconst {
30290 continue
30291 }
30292 c := auxIntToInt64(v_0_0.AuxInt)
30293 x := v_0_1
30294 if !(isUint64PowerOfTwo(c)) {
30295 continue
30296 }
30297 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30298 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
30299 v0.AddArg(x)
30300 b.resetWithControl(BlockAMD64UGE, v0)
30301 return true
30302 }
30303 break
30304 }
30305
30306
30307
30308 for b.Controls[0].Op == OpAMD64TESTQ {
30309 v_0 := b.Controls[0]
30310 _ = v_0.Args[1]
30311 v_0_0 := v_0.Args[0]
30312 v_0_1 := v_0.Args[1]
30313 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30314 z1 := v_0_0
30315 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
30316 continue
30317 }
30318 z1_0 := z1.Args[0]
30319 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30320 continue
30321 }
30322 x := z1_0.Args[0]
30323 z2 := v_0_1
30324 if !(z1 == z2) {
30325 continue
30326 }
30327 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30328 v0.AuxInt = int8ToAuxInt(63)
30329 v0.AddArg(x)
30330 b.resetWithControl(BlockAMD64UGE, v0)
30331 return true
30332 }
30333 break
30334 }
30335
30336
30337
30338 for b.Controls[0].Op == OpAMD64TESTL {
30339 v_0 := b.Controls[0]
30340 _ = v_0.Args[1]
30341 v_0_0 := v_0.Args[0]
30342 v_0_1 := v_0.Args[1]
30343 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30344 z1 := v_0_0
30345 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
30346 continue
30347 }
30348 z1_0 := z1.Args[0]
30349 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30350 continue
30351 }
30352 x := z1_0.Args[0]
30353 z2 := v_0_1
30354 if !(z1 == z2) {
30355 continue
30356 }
30357 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30358 v0.AuxInt = int8ToAuxInt(31)
30359 v0.AddArg(x)
30360 b.resetWithControl(BlockAMD64UGE, v0)
30361 return true
30362 }
30363 break
30364 }
30365
30366
30367
30368 for b.Controls[0].Op == OpAMD64TESTQ {
30369 v_0 := b.Controls[0]
30370 _ = v_0.Args[1]
30371 v_0_0 := v_0.Args[0]
30372 v_0_1 := v_0.Args[1]
30373 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30374 z1 := v_0_0
30375 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30376 continue
30377 }
30378 z1_0 := z1.Args[0]
30379 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30380 continue
30381 }
30382 x := z1_0.Args[0]
30383 z2 := v_0_1
30384 if !(z1 == z2) {
30385 continue
30386 }
30387 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30388 v0.AuxInt = int8ToAuxInt(0)
30389 v0.AddArg(x)
30390 b.resetWithControl(BlockAMD64UGE, v0)
30391 return true
30392 }
30393 break
30394 }
30395
30396
30397
30398 for b.Controls[0].Op == OpAMD64TESTL {
30399 v_0 := b.Controls[0]
30400 _ = v_0.Args[1]
30401 v_0_0 := v_0.Args[0]
30402 v_0_1 := v_0.Args[1]
30403 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30404 z1 := v_0_0
30405 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30406 continue
30407 }
30408 z1_0 := z1.Args[0]
30409 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30410 continue
30411 }
30412 x := z1_0.Args[0]
30413 z2 := v_0_1
30414 if !(z1 == z2) {
30415 continue
30416 }
30417 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30418 v0.AuxInt = int8ToAuxInt(0)
30419 v0.AddArg(x)
30420 b.resetWithControl(BlockAMD64UGE, v0)
30421 return true
30422 }
30423 break
30424 }
30425
30426
30427
30428 for b.Controls[0].Op == OpAMD64TESTQ {
30429 v_0 := b.Controls[0]
30430 _ = v_0.Args[1]
30431 v_0_0 := v_0.Args[0]
30432 v_0_1 := v_0.Args[1]
30433 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30434 z1 := v_0_0
30435 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30436 continue
30437 }
30438 x := z1.Args[0]
30439 z2 := v_0_1
30440 if !(z1 == z2) {
30441 continue
30442 }
30443 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30444 v0.AuxInt = int8ToAuxInt(63)
30445 v0.AddArg(x)
30446 b.resetWithControl(BlockAMD64UGE, v0)
30447 return true
30448 }
30449 break
30450 }
30451
30452
30453
30454 for b.Controls[0].Op == OpAMD64TESTL {
30455 v_0 := b.Controls[0]
30456 _ = v_0.Args[1]
30457 v_0_0 := v_0.Args[0]
30458 v_0_1 := v_0.Args[1]
30459 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30460 z1 := v_0_0
30461 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30462 continue
30463 }
30464 x := z1.Args[0]
30465 z2 := v_0_1
30466 if !(z1 == z2) {
30467 continue
30468 }
30469 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30470 v0.AuxInt = int8ToAuxInt(31)
30471 v0.AddArg(x)
30472 b.resetWithControl(BlockAMD64UGE, v0)
30473 return true
30474 }
30475 break
30476 }
30477
30478
30479 for b.Controls[0].Op == OpAMD64InvertFlags {
30480 v_0 := b.Controls[0]
30481 cmp := v_0.Args[0]
30482 b.resetWithControl(BlockAMD64EQ, cmp)
30483 return true
30484 }
30485
30486
30487 for b.Controls[0].Op == OpAMD64FlagEQ {
30488 b.Reset(BlockFirst)
30489 return true
30490 }
30491
30492
30493 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30494 b.Reset(BlockFirst)
30495 b.swapSuccessors()
30496 return true
30497 }
30498
30499
30500 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30501 b.Reset(BlockFirst)
30502 b.swapSuccessors()
30503 return true
30504 }
30505
30506
30507 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30508 b.Reset(BlockFirst)
30509 b.swapSuccessors()
30510 return true
30511 }
30512
30513
30514 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30515 b.Reset(BlockFirst)
30516 b.swapSuccessors()
30517 return true
30518 }
30519
30520
30521 for b.Controls[0].Op == OpAMD64TESTQ {
30522 v_0 := b.Controls[0]
30523 _ = v_0.Args[1]
30524 v_0_0 := v_0.Args[0]
30525 v_0_1 := v_0.Args[1]
30526 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30527 s := v_0_0
30528 if s.Op != OpSelect0 {
30529 continue
30530 }
30531 blsr := s.Args[0]
30532 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
30533 continue
30534 }
30535 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
30536 v0.AddArg(blsr)
30537 b.resetWithControl(BlockAMD64EQ, v0)
30538 return true
30539 }
30540 break
30541 }
30542
30543
30544 for b.Controls[0].Op == OpAMD64TESTL {
30545 v_0 := b.Controls[0]
30546 _ = v_0.Args[1]
30547 v_0_0 := v_0.Args[0]
30548 v_0_1 := v_0.Args[1]
30549 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30550 s := v_0_0
30551 if s.Op != OpSelect0 {
30552 continue
30553 }
30554 blsr := s.Args[0]
30555 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
30556 continue
30557 }
30558 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
30559 v0.AddArg(blsr)
30560 b.resetWithControl(BlockAMD64EQ, v0)
30561 return true
30562 }
30563 break
30564 }
30565 case BlockAMD64GE:
30566
30567
30568 for b.Controls[0].Op == OpAMD64InvertFlags {
30569 v_0 := b.Controls[0]
30570 cmp := v_0.Args[0]
30571 b.resetWithControl(BlockAMD64LE, cmp)
30572 return true
30573 }
30574
30575
30576 for b.Controls[0].Op == OpAMD64FlagEQ {
30577 b.Reset(BlockFirst)
30578 return true
30579 }
30580
30581
30582 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30583 b.Reset(BlockFirst)
30584 b.swapSuccessors()
30585 return true
30586 }
30587
30588
30589 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30590 b.Reset(BlockFirst)
30591 b.swapSuccessors()
30592 return true
30593 }
30594
30595
30596 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30597 b.Reset(BlockFirst)
30598 return true
30599 }
30600
30601
30602 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30603 b.Reset(BlockFirst)
30604 return true
30605 }
30606 case BlockAMD64GT:
30607
30608
30609 for b.Controls[0].Op == OpAMD64InvertFlags {
30610 v_0 := b.Controls[0]
30611 cmp := v_0.Args[0]
30612 b.resetWithControl(BlockAMD64LT, cmp)
30613 return true
30614 }
30615
30616
30617 for b.Controls[0].Op == OpAMD64FlagEQ {
30618 b.Reset(BlockFirst)
30619 b.swapSuccessors()
30620 return true
30621 }
30622
30623
30624 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30625 b.Reset(BlockFirst)
30626 b.swapSuccessors()
30627 return true
30628 }
30629
30630
30631 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30632 b.Reset(BlockFirst)
30633 b.swapSuccessors()
30634 return true
30635 }
30636
30637
30638 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30639 b.Reset(BlockFirst)
30640 return true
30641 }
30642
30643
30644 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30645 b.Reset(BlockFirst)
30646 return true
30647 }
30648 case BlockIf:
30649
30650
30651 for b.Controls[0].Op == OpAMD64SETL {
30652 v_0 := b.Controls[0]
30653 cmp := v_0.Args[0]
30654 b.resetWithControl(BlockAMD64LT, cmp)
30655 return true
30656 }
30657
30658
30659 for b.Controls[0].Op == OpAMD64SETLE {
30660 v_0 := b.Controls[0]
30661 cmp := v_0.Args[0]
30662 b.resetWithControl(BlockAMD64LE, cmp)
30663 return true
30664 }
30665
30666
30667 for b.Controls[0].Op == OpAMD64SETG {
30668 v_0 := b.Controls[0]
30669 cmp := v_0.Args[0]
30670 b.resetWithControl(BlockAMD64GT, cmp)
30671 return true
30672 }
30673
30674
30675 for b.Controls[0].Op == OpAMD64SETGE {
30676 v_0 := b.Controls[0]
30677 cmp := v_0.Args[0]
30678 b.resetWithControl(BlockAMD64GE, cmp)
30679 return true
30680 }
30681
30682
30683 for b.Controls[0].Op == OpAMD64SETEQ {
30684 v_0 := b.Controls[0]
30685 cmp := v_0.Args[0]
30686 b.resetWithControl(BlockAMD64EQ, cmp)
30687 return true
30688 }
30689
30690
30691 for b.Controls[0].Op == OpAMD64SETNE {
30692 v_0 := b.Controls[0]
30693 cmp := v_0.Args[0]
30694 b.resetWithControl(BlockAMD64NE, cmp)
30695 return true
30696 }
30697
30698
30699 for b.Controls[0].Op == OpAMD64SETB {
30700 v_0 := b.Controls[0]
30701 cmp := v_0.Args[0]
30702 b.resetWithControl(BlockAMD64ULT, cmp)
30703 return true
30704 }
30705
30706
30707 for b.Controls[0].Op == OpAMD64SETBE {
30708 v_0 := b.Controls[0]
30709 cmp := v_0.Args[0]
30710 b.resetWithControl(BlockAMD64ULE, cmp)
30711 return true
30712 }
30713
30714
30715 for b.Controls[0].Op == OpAMD64SETA {
30716 v_0 := b.Controls[0]
30717 cmp := v_0.Args[0]
30718 b.resetWithControl(BlockAMD64UGT, cmp)
30719 return true
30720 }
30721
30722
30723 for b.Controls[0].Op == OpAMD64SETAE {
30724 v_0 := b.Controls[0]
30725 cmp := v_0.Args[0]
30726 b.resetWithControl(BlockAMD64UGE, cmp)
30727 return true
30728 }
30729
30730
30731 for b.Controls[0].Op == OpAMD64SETO {
30732 v_0 := b.Controls[0]
30733 cmp := v_0.Args[0]
30734 b.resetWithControl(BlockAMD64OS, cmp)
30735 return true
30736 }
30737
30738
30739 for b.Controls[0].Op == OpAMD64SETGF {
30740 v_0 := b.Controls[0]
30741 cmp := v_0.Args[0]
30742 b.resetWithControl(BlockAMD64UGT, cmp)
30743 return true
30744 }
30745
30746
30747 for b.Controls[0].Op == OpAMD64SETGEF {
30748 v_0 := b.Controls[0]
30749 cmp := v_0.Args[0]
30750 b.resetWithControl(BlockAMD64UGE, cmp)
30751 return true
30752 }
30753
30754
30755 for b.Controls[0].Op == OpAMD64SETEQF {
30756 v_0 := b.Controls[0]
30757 cmp := v_0.Args[0]
30758 b.resetWithControl(BlockAMD64EQF, cmp)
30759 return true
30760 }
30761
30762
30763 for b.Controls[0].Op == OpAMD64SETNEF {
30764 v_0 := b.Controls[0]
30765 cmp := v_0.Args[0]
30766 b.resetWithControl(BlockAMD64NEF, cmp)
30767 return true
30768 }
30769
30770
30771 for {
30772 cond := b.Controls[0]
30773 v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
30774 v0.AddArg2(cond, cond)
30775 b.resetWithControl(BlockAMD64NE, v0)
30776 return true
30777 }
30778 case BlockJumpTable:
30779
30780
30781 for {
30782 idx := b.Controls[0]
30783 v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr)
30784 v0.Aux = symToAux(makeJumpTableSym(b))
30785 v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr)
30786 v0.AddArg(v1)
30787 b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0)
30788 b.Aux = symToAux(makeJumpTableSym(b))
30789 return true
30790 }
30791 case BlockAMD64LE:
30792
30793
30794 for b.Controls[0].Op == OpAMD64InvertFlags {
30795 v_0 := b.Controls[0]
30796 cmp := v_0.Args[0]
30797 b.resetWithControl(BlockAMD64GE, cmp)
30798 return true
30799 }
30800
30801
30802 for b.Controls[0].Op == OpAMD64FlagEQ {
30803 b.Reset(BlockFirst)
30804 return true
30805 }
30806
30807
30808 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30809 b.Reset(BlockFirst)
30810 return true
30811 }
30812
30813
30814 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30815 b.Reset(BlockFirst)
30816 return true
30817 }
30818
30819
30820 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30821 b.Reset(BlockFirst)
30822 b.swapSuccessors()
30823 return true
30824 }
30825
30826
30827 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30828 b.Reset(BlockFirst)
30829 b.swapSuccessors()
30830 return true
30831 }
30832 case BlockAMD64LT:
30833
30834
30835 for b.Controls[0].Op == OpAMD64InvertFlags {
30836 v_0 := b.Controls[0]
30837 cmp := v_0.Args[0]
30838 b.resetWithControl(BlockAMD64GT, cmp)
30839 return true
30840 }
30841
30842
30843 for b.Controls[0].Op == OpAMD64FlagEQ {
30844 b.Reset(BlockFirst)
30845 b.swapSuccessors()
30846 return true
30847 }
30848
30849
30850 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30851 b.Reset(BlockFirst)
30852 return true
30853 }
30854
30855
30856 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30857 b.Reset(BlockFirst)
30858 return true
30859 }
30860
30861
30862 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30863 b.Reset(BlockFirst)
30864 b.swapSuccessors()
30865 return true
30866 }
30867
30868
30869 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30870 b.Reset(BlockFirst)
30871 b.swapSuccessors()
30872 return true
30873 }
30874 case BlockAMD64NE:
30875
30876
30877 for b.Controls[0].Op == OpAMD64TESTB {
30878 v_0 := b.Controls[0]
30879 _ = v_0.Args[1]
30880 v_0_0 := v_0.Args[0]
30881 if v_0_0.Op != OpAMD64SETL {
30882 break
30883 }
30884 cmp := v_0_0.Args[0]
30885 v_0_1 := v_0.Args[1]
30886 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
30887 break
30888 }
30889 b.resetWithControl(BlockAMD64LT, cmp)
30890 return true
30891 }
30892
30893
30894 for b.Controls[0].Op == OpAMD64TESTB {
30895 v_0 := b.Controls[0]
30896 _ = v_0.Args[1]
30897 v_0_0 := v_0.Args[0]
30898 if v_0_0.Op != OpAMD64SETLE {
30899 break
30900 }
30901 cmp := v_0_0.Args[0]
30902 v_0_1 := v_0.Args[1]
30903 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
30904 break
30905 }
30906 b.resetWithControl(BlockAMD64LE, cmp)
30907 return true
30908 }
30909
30910
30911 for b.Controls[0].Op == OpAMD64TESTB {
30912 v_0 := b.Controls[0]
30913 _ = v_0.Args[1]
30914 v_0_0 := v_0.Args[0]
30915 if v_0_0.Op != OpAMD64SETG {
30916 break
30917 }
30918 cmp := v_0_0.Args[0]
30919 v_0_1 := v_0.Args[1]
30920 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
30921 break
30922 }
30923 b.resetWithControl(BlockAMD64GT, cmp)
30924 return true
30925 }
30926
30927
30928 for b.Controls[0].Op == OpAMD64TESTB {
30929 v_0 := b.Controls[0]
30930 _ = v_0.Args[1]
30931 v_0_0 := v_0.Args[0]
30932 if v_0_0.Op != OpAMD64SETGE {
30933 break
30934 }
30935 cmp := v_0_0.Args[0]
30936 v_0_1 := v_0.Args[1]
30937 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
30938 break
30939 }
30940 b.resetWithControl(BlockAMD64GE, cmp)
30941 return true
30942 }
30943
30944
30945 for b.Controls[0].Op == OpAMD64TESTB {
30946 v_0 := b.Controls[0]
30947 _ = v_0.Args[1]
30948 v_0_0 := v_0.Args[0]
30949 if v_0_0.Op != OpAMD64SETEQ {
30950 break
30951 }
30952 cmp := v_0_0.Args[0]
30953 v_0_1 := v_0.Args[1]
30954 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
30955 break
30956 }
30957 b.resetWithControl(BlockAMD64EQ, cmp)
30958 return true
30959 }
30960
30961
30962 for b.Controls[0].Op == OpAMD64TESTB {
30963 v_0 := b.Controls[0]
30964 _ = v_0.Args[1]
30965 v_0_0 := v_0.Args[0]
30966 if v_0_0.Op != OpAMD64SETNE {
30967 break
30968 }
30969 cmp := v_0_0.Args[0]
30970 v_0_1 := v_0.Args[1]
30971 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
30972 break
30973 }
30974 b.resetWithControl(BlockAMD64NE, cmp)
30975 return true
30976 }
30977
30978
30979 for b.Controls[0].Op == OpAMD64TESTB {
30980 v_0 := b.Controls[0]
30981 _ = v_0.Args[1]
30982 v_0_0 := v_0.Args[0]
30983 if v_0_0.Op != OpAMD64SETB {
30984 break
30985 }
30986 cmp := v_0_0.Args[0]
30987 v_0_1 := v_0.Args[1]
30988 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
30989 break
30990 }
30991 b.resetWithControl(BlockAMD64ULT, cmp)
30992 return true
30993 }
30994
30995
30996 for b.Controls[0].Op == OpAMD64TESTB {
30997 v_0 := b.Controls[0]
30998 _ = v_0.Args[1]
30999 v_0_0 := v_0.Args[0]
31000 if v_0_0.Op != OpAMD64SETBE {
31001 break
31002 }
31003 cmp := v_0_0.Args[0]
31004 v_0_1 := v_0.Args[1]
31005 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
31006 break
31007 }
31008 b.resetWithControl(BlockAMD64ULE, cmp)
31009 return true
31010 }
31011
31012
31013 for b.Controls[0].Op == OpAMD64TESTB {
31014 v_0 := b.Controls[0]
31015 _ = v_0.Args[1]
31016 v_0_0 := v_0.Args[0]
31017 if v_0_0.Op != OpAMD64SETA {
31018 break
31019 }
31020 cmp := v_0_0.Args[0]
31021 v_0_1 := v_0.Args[1]
31022 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
31023 break
31024 }
31025 b.resetWithControl(BlockAMD64UGT, cmp)
31026 return true
31027 }
31028
31029
31030 for b.Controls[0].Op == OpAMD64TESTB {
31031 v_0 := b.Controls[0]
31032 _ = v_0.Args[1]
31033 v_0_0 := v_0.Args[0]
31034 if v_0_0.Op != OpAMD64SETAE {
31035 break
31036 }
31037 cmp := v_0_0.Args[0]
31038 v_0_1 := v_0.Args[1]
31039 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
31040 break
31041 }
31042 b.resetWithControl(BlockAMD64UGE, cmp)
31043 return true
31044 }
31045
31046
31047 for b.Controls[0].Op == OpAMD64TESTB {
31048 v_0 := b.Controls[0]
31049 _ = v_0.Args[1]
31050 v_0_0 := v_0.Args[0]
31051 if v_0_0.Op != OpAMD64SETO {
31052 break
31053 }
31054 cmp := v_0_0.Args[0]
31055 v_0_1 := v_0.Args[1]
31056 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
31057 break
31058 }
31059 b.resetWithControl(BlockAMD64OS, cmp)
31060 return true
31061 }
31062
31063
31064 for b.Controls[0].Op == OpAMD64TESTL {
31065 v_0 := b.Controls[0]
31066 _ = v_0.Args[1]
31067 v_0_0 := v_0.Args[0]
31068 v_0_1 := v_0.Args[1]
31069 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31070 if v_0_0.Op != OpAMD64SHLL {
31071 continue
31072 }
31073 x := v_0_0.Args[1]
31074 v_0_0_0 := v_0_0.Args[0]
31075 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
31076 continue
31077 }
31078 y := v_0_1
31079 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
31080 v0.AddArg2(x, y)
31081 b.resetWithControl(BlockAMD64ULT, v0)
31082 return true
31083 }
31084 break
31085 }
31086
31087
31088 for b.Controls[0].Op == OpAMD64TESTQ {
31089 v_0 := b.Controls[0]
31090 _ = v_0.Args[1]
31091 v_0_0 := v_0.Args[0]
31092 v_0_1 := v_0.Args[1]
31093 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31094 if v_0_0.Op != OpAMD64SHLQ {
31095 continue
31096 }
31097 x := v_0_0.Args[1]
31098 v_0_0_0 := v_0_0.Args[0]
31099 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
31100 continue
31101 }
31102 y := v_0_1
31103 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
31104 v0.AddArg2(x, y)
31105 b.resetWithControl(BlockAMD64ULT, v0)
31106 return true
31107 }
31108 break
31109 }
31110
31111
31112
31113 for b.Controls[0].Op == OpAMD64TESTLconst {
31114 v_0 := b.Controls[0]
31115 c := auxIntToInt32(v_0.AuxInt)
31116 x := v_0.Args[0]
31117 if !(isUint32PowerOfTwo(int64(c))) {
31118 break
31119 }
31120 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31121 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31122 v0.AddArg(x)
31123 b.resetWithControl(BlockAMD64ULT, v0)
31124 return true
31125 }
31126
31127
31128
31129 for b.Controls[0].Op == OpAMD64TESTQconst {
31130 v_0 := b.Controls[0]
31131 c := auxIntToInt32(v_0.AuxInt)
31132 x := v_0.Args[0]
31133 if !(isUint64PowerOfTwo(int64(c))) {
31134 break
31135 }
31136 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31137 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31138 v0.AddArg(x)
31139 b.resetWithControl(BlockAMD64ULT, v0)
31140 return true
31141 }
31142
31143
31144
31145 for b.Controls[0].Op == OpAMD64TESTQ {
31146 v_0 := b.Controls[0]
31147 _ = v_0.Args[1]
31148 v_0_0 := v_0.Args[0]
31149 v_0_1 := v_0.Args[1]
31150 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31151 if v_0_0.Op != OpAMD64MOVQconst {
31152 continue
31153 }
31154 c := auxIntToInt64(v_0_0.AuxInt)
31155 x := v_0_1
31156 if !(isUint64PowerOfTwo(c)) {
31157 continue
31158 }
31159 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31160 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
31161 v0.AddArg(x)
31162 b.resetWithControl(BlockAMD64ULT, v0)
31163 return true
31164 }
31165 break
31166 }
31167
31168
31169
31170 for b.Controls[0].Op == OpAMD64TESTQ {
31171 v_0 := b.Controls[0]
31172 _ = v_0.Args[1]
31173 v_0_0 := v_0.Args[0]
31174 v_0_1 := v_0.Args[1]
31175 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31176 z1 := v_0_0
31177 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
31178 continue
31179 }
31180 z1_0 := z1.Args[0]
31181 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31182 continue
31183 }
31184 x := z1_0.Args[0]
31185 z2 := v_0_1
31186 if !(z1 == z2) {
31187 continue
31188 }
31189 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31190 v0.AuxInt = int8ToAuxInt(63)
31191 v0.AddArg(x)
31192 b.resetWithControl(BlockAMD64ULT, v0)
31193 return true
31194 }
31195 break
31196 }
31197
31198
31199
31200 for b.Controls[0].Op == OpAMD64TESTL {
31201 v_0 := b.Controls[0]
31202 _ = v_0.Args[1]
31203 v_0_0 := v_0.Args[0]
31204 v_0_1 := v_0.Args[1]
31205 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31206 z1 := v_0_0
31207 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
31208 continue
31209 }
31210 z1_0 := z1.Args[0]
31211 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31212 continue
31213 }
31214 x := z1_0.Args[0]
31215 z2 := v_0_1
31216 if !(z1 == z2) {
31217 continue
31218 }
31219 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31220 v0.AuxInt = int8ToAuxInt(31)
31221 v0.AddArg(x)
31222 b.resetWithControl(BlockAMD64ULT, v0)
31223 return true
31224 }
31225 break
31226 }
31227
31228
31229
31230 for b.Controls[0].Op == OpAMD64TESTQ {
31231 v_0 := b.Controls[0]
31232 _ = v_0.Args[1]
31233 v_0_0 := v_0.Args[0]
31234 v_0_1 := v_0.Args[1]
31235 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31236 z1 := v_0_0
31237 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31238 continue
31239 }
31240 z1_0 := z1.Args[0]
31241 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31242 continue
31243 }
31244 x := z1_0.Args[0]
31245 z2 := v_0_1
31246 if !(z1 == z2) {
31247 continue
31248 }
31249 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31250 v0.AuxInt = int8ToAuxInt(0)
31251 v0.AddArg(x)
31252 b.resetWithControl(BlockAMD64ULT, v0)
31253 return true
31254 }
31255 break
31256 }
31257
31258
31259
31260 for b.Controls[0].Op == OpAMD64TESTL {
31261 v_0 := b.Controls[0]
31262 _ = v_0.Args[1]
31263 v_0_0 := v_0.Args[0]
31264 v_0_1 := v_0.Args[1]
31265 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31266 z1 := v_0_0
31267 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31268 continue
31269 }
31270 z1_0 := z1.Args[0]
31271 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31272 continue
31273 }
31274 x := z1_0.Args[0]
31275 z2 := v_0_1
31276 if !(z1 == z2) {
31277 continue
31278 }
31279 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31280 v0.AuxInt = int8ToAuxInt(0)
31281 v0.AddArg(x)
31282 b.resetWithControl(BlockAMD64ULT, v0)
31283 return true
31284 }
31285 break
31286 }
31287
31288
31289
31290 for b.Controls[0].Op == OpAMD64TESTQ {
31291 v_0 := b.Controls[0]
31292 _ = v_0.Args[1]
31293 v_0_0 := v_0.Args[0]
31294 v_0_1 := v_0.Args[1]
31295 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31296 z1 := v_0_0
31297 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31298 continue
31299 }
31300 x := z1.Args[0]
31301 z2 := v_0_1
31302 if !(z1 == z2) {
31303 continue
31304 }
31305 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31306 v0.AuxInt = int8ToAuxInt(63)
31307 v0.AddArg(x)
31308 b.resetWithControl(BlockAMD64ULT, v0)
31309 return true
31310 }
31311 break
31312 }
31313
31314
31315
31316 for b.Controls[0].Op == OpAMD64TESTL {
31317 v_0 := b.Controls[0]
31318 _ = v_0.Args[1]
31319 v_0_0 := v_0.Args[0]
31320 v_0_1 := v_0.Args[1]
31321 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31322 z1 := v_0_0
31323 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31324 continue
31325 }
31326 x := z1.Args[0]
31327 z2 := v_0_1
31328 if !(z1 == z2) {
31329 continue
31330 }
31331 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31332 v0.AuxInt = int8ToAuxInt(31)
31333 v0.AddArg(x)
31334 b.resetWithControl(BlockAMD64ULT, v0)
31335 return true
31336 }
31337 break
31338 }
31339
31340
31341 for b.Controls[0].Op == OpAMD64TESTB {
31342 v_0 := b.Controls[0]
31343 _ = v_0.Args[1]
31344 v_0_0 := v_0.Args[0]
31345 if v_0_0.Op != OpAMD64SETGF {
31346 break
31347 }
31348 cmp := v_0_0.Args[0]
31349 v_0_1 := v_0.Args[1]
31350 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
31351 break
31352 }
31353 b.resetWithControl(BlockAMD64UGT, cmp)
31354 return true
31355 }
31356
31357
31358 for b.Controls[0].Op == OpAMD64TESTB {
31359 v_0 := b.Controls[0]
31360 _ = v_0.Args[1]
31361 v_0_0 := v_0.Args[0]
31362 if v_0_0.Op != OpAMD64SETGEF {
31363 break
31364 }
31365 cmp := v_0_0.Args[0]
31366 v_0_1 := v_0.Args[1]
31367 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
31368 break
31369 }
31370 b.resetWithControl(BlockAMD64UGE, cmp)
31371 return true
31372 }
31373
31374
31375 for b.Controls[0].Op == OpAMD64TESTB {
31376 v_0 := b.Controls[0]
31377 _ = v_0.Args[1]
31378 v_0_0 := v_0.Args[0]
31379 if v_0_0.Op != OpAMD64SETEQF {
31380 break
31381 }
31382 cmp := v_0_0.Args[0]
31383 v_0_1 := v_0.Args[1]
31384 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
31385 break
31386 }
31387 b.resetWithControl(BlockAMD64EQF, cmp)
31388 return true
31389 }
31390
31391
31392 for b.Controls[0].Op == OpAMD64TESTB {
31393 v_0 := b.Controls[0]
31394 _ = v_0.Args[1]
31395 v_0_0 := v_0.Args[0]
31396 if v_0_0.Op != OpAMD64SETNEF {
31397 break
31398 }
31399 cmp := v_0_0.Args[0]
31400 v_0_1 := v_0.Args[1]
31401 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
31402 break
31403 }
31404 b.resetWithControl(BlockAMD64NEF, cmp)
31405 return true
31406 }
31407
31408
31409 for b.Controls[0].Op == OpAMD64InvertFlags {
31410 v_0 := b.Controls[0]
31411 cmp := v_0.Args[0]
31412 b.resetWithControl(BlockAMD64NE, cmp)
31413 return true
31414 }
31415
31416
31417 for b.Controls[0].Op == OpAMD64FlagEQ {
31418 b.Reset(BlockFirst)
31419 b.swapSuccessors()
31420 return true
31421 }
31422
31423
31424 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31425 b.Reset(BlockFirst)
31426 return true
31427 }
31428
31429
31430 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31431 b.Reset(BlockFirst)
31432 return true
31433 }
31434
31435
31436 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31437 b.Reset(BlockFirst)
31438 return true
31439 }
31440
31441
31442 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31443 b.Reset(BlockFirst)
31444 return true
31445 }
31446
31447
31448 for b.Controls[0].Op == OpAMD64TESTQ {
31449 v_0 := b.Controls[0]
31450 _ = v_0.Args[1]
31451 v_0_0 := v_0.Args[0]
31452 v_0_1 := v_0.Args[1]
31453 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31454 s := v_0_0
31455 if s.Op != OpSelect0 {
31456 continue
31457 }
31458 blsr := s.Args[0]
31459 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
31460 continue
31461 }
31462 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31463 v0.AddArg(blsr)
31464 b.resetWithControl(BlockAMD64NE, v0)
31465 return true
31466 }
31467 break
31468 }
31469
31470
31471 for b.Controls[0].Op == OpAMD64TESTL {
31472 v_0 := b.Controls[0]
31473 _ = v_0.Args[1]
31474 v_0_0 := v_0.Args[0]
31475 v_0_1 := v_0.Args[1]
31476 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31477 s := v_0_0
31478 if s.Op != OpSelect0 {
31479 continue
31480 }
31481 blsr := s.Args[0]
31482 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
31483 continue
31484 }
31485 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31486 v0.AddArg(blsr)
31487 b.resetWithControl(BlockAMD64NE, v0)
31488 return true
31489 }
31490 break
31491 }
31492 case BlockAMD64UGE:
31493
31494
31495 for b.Controls[0].Op == OpAMD64TESTQ {
31496 v_0 := b.Controls[0]
31497 x := v_0.Args[1]
31498 if x != v_0.Args[0] {
31499 break
31500 }
31501 b.Reset(BlockFirst)
31502 return true
31503 }
31504
31505
31506 for b.Controls[0].Op == OpAMD64TESTL {
31507 v_0 := b.Controls[0]
31508 x := v_0.Args[1]
31509 if x != v_0.Args[0] {
31510 break
31511 }
31512 b.Reset(BlockFirst)
31513 return true
31514 }
31515
31516
31517 for b.Controls[0].Op == OpAMD64TESTW {
31518 v_0 := b.Controls[0]
31519 x := v_0.Args[1]
31520 if x != v_0.Args[0] {
31521 break
31522 }
31523 b.Reset(BlockFirst)
31524 return true
31525 }
31526
31527
31528 for b.Controls[0].Op == OpAMD64TESTB {
31529 v_0 := b.Controls[0]
31530 x := v_0.Args[1]
31531 if x != v_0.Args[0] {
31532 break
31533 }
31534 b.Reset(BlockFirst)
31535 return true
31536 }
31537
31538
31539 for b.Controls[0].Op == OpAMD64InvertFlags {
31540 v_0 := b.Controls[0]
31541 cmp := v_0.Args[0]
31542 b.resetWithControl(BlockAMD64ULE, cmp)
31543 return true
31544 }
31545
31546
31547 for b.Controls[0].Op == OpAMD64FlagEQ {
31548 b.Reset(BlockFirst)
31549 return true
31550 }
31551
31552
31553 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31554 b.Reset(BlockFirst)
31555 b.swapSuccessors()
31556 return true
31557 }
31558
31559
31560 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31561 b.Reset(BlockFirst)
31562 return true
31563 }
31564
31565
31566 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31567 b.Reset(BlockFirst)
31568 b.swapSuccessors()
31569 return true
31570 }
31571
31572
31573 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31574 b.Reset(BlockFirst)
31575 return true
31576 }
31577 case BlockAMD64UGT:
31578
31579
31580 for b.Controls[0].Op == OpAMD64InvertFlags {
31581 v_0 := b.Controls[0]
31582 cmp := v_0.Args[0]
31583 b.resetWithControl(BlockAMD64ULT, cmp)
31584 return true
31585 }
31586
31587
31588 for b.Controls[0].Op == OpAMD64FlagEQ {
31589 b.Reset(BlockFirst)
31590 b.swapSuccessors()
31591 return true
31592 }
31593
31594
31595 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31596 b.Reset(BlockFirst)
31597 b.swapSuccessors()
31598 return true
31599 }
31600
31601
31602 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31603 b.Reset(BlockFirst)
31604 return true
31605 }
31606
31607
31608 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31609 b.Reset(BlockFirst)
31610 b.swapSuccessors()
31611 return true
31612 }
31613
31614
31615 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31616 b.Reset(BlockFirst)
31617 return true
31618 }
31619 case BlockAMD64ULE:
31620
31621
31622 for b.Controls[0].Op == OpAMD64InvertFlags {
31623 v_0 := b.Controls[0]
31624 cmp := v_0.Args[0]
31625 b.resetWithControl(BlockAMD64UGE, cmp)
31626 return true
31627 }
31628
31629
31630 for b.Controls[0].Op == OpAMD64FlagEQ {
31631 b.Reset(BlockFirst)
31632 return true
31633 }
31634
31635
31636 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31637 b.Reset(BlockFirst)
31638 return true
31639 }
31640
31641
31642 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31643 b.Reset(BlockFirst)
31644 b.swapSuccessors()
31645 return true
31646 }
31647
31648
31649 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31650 b.Reset(BlockFirst)
31651 return true
31652 }
31653
31654
31655 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31656 b.Reset(BlockFirst)
31657 b.swapSuccessors()
31658 return true
31659 }
31660 case BlockAMD64ULT:
31661
31662
31663 for b.Controls[0].Op == OpAMD64TESTQ {
31664 v_0 := b.Controls[0]
31665 x := v_0.Args[1]
31666 if x != v_0.Args[0] {
31667 break
31668 }
31669 b.Reset(BlockFirst)
31670 b.swapSuccessors()
31671 return true
31672 }
31673
31674
31675 for b.Controls[0].Op == OpAMD64TESTL {
31676 v_0 := b.Controls[0]
31677 x := v_0.Args[1]
31678 if x != v_0.Args[0] {
31679 break
31680 }
31681 b.Reset(BlockFirst)
31682 b.swapSuccessors()
31683 return true
31684 }
31685
31686
31687 for b.Controls[0].Op == OpAMD64TESTW {
31688 v_0 := b.Controls[0]
31689 x := v_0.Args[1]
31690 if x != v_0.Args[0] {
31691 break
31692 }
31693 b.Reset(BlockFirst)
31694 b.swapSuccessors()
31695 return true
31696 }
31697
31698
31699 for b.Controls[0].Op == OpAMD64TESTB {
31700 v_0 := b.Controls[0]
31701 x := v_0.Args[1]
31702 if x != v_0.Args[0] {
31703 break
31704 }
31705 b.Reset(BlockFirst)
31706 b.swapSuccessors()
31707 return true
31708 }
31709
31710
31711 for b.Controls[0].Op == OpAMD64InvertFlags {
31712 v_0 := b.Controls[0]
31713 cmp := v_0.Args[0]
31714 b.resetWithControl(BlockAMD64UGT, cmp)
31715 return true
31716 }
31717
31718
31719 for b.Controls[0].Op == OpAMD64FlagEQ {
31720 b.Reset(BlockFirst)
31721 b.swapSuccessors()
31722 return true
31723 }
31724
31725
31726 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31727 b.Reset(BlockFirst)
31728 return true
31729 }
31730
31731
31732 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31733 b.Reset(BlockFirst)
31734 b.swapSuccessors()
31735 return true
31736 }
31737
31738
31739 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31740 b.Reset(BlockFirst)
31741 return true
31742 }
31743
31744
31745 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31746 b.Reset(BlockFirst)
31747 b.swapSuccessors()
31748 return true
31749 }
31750 }
31751 return false
31752 }
31753
View as plain text