1
2
3 package ssa
4
5 import "internal/buildcfg"
6 import "math"
7 import "cmd/internal/obj"
8 import "cmd/compile/internal/types"
9
10 func rewriteValueAMD64(v *Value) bool {
11 switch v.Op {
12 case OpAMD64ADCQ:
13 return rewriteValueAMD64_OpAMD64ADCQ(v)
14 case OpAMD64ADCQconst:
15 return rewriteValueAMD64_OpAMD64ADCQconst(v)
16 case OpAMD64ADDL:
17 return rewriteValueAMD64_OpAMD64ADDL(v)
18 case OpAMD64ADDLconst:
19 return rewriteValueAMD64_OpAMD64ADDLconst(v)
20 case OpAMD64ADDLconstmodify:
21 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
22 case OpAMD64ADDLload:
23 return rewriteValueAMD64_OpAMD64ADDLload(v)
24 case OpAMD64ADDLmodify:
25 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
26 case OpAMD64ADDQ:
27 return rewriteValueAMD64_OpAMD64ADDQ(v)
28 case OpAMD64ADDQcarry:
29 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
30 case OpAMD64ADDQconst:
31 return rewriteValueAMD64_OpAMD64ADDQconst(v)
32 case OpAMD64ADDQconstmodify:
33 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
34 case OpAMD64ADDQload:
35 return rewriteValueAMD64_OpAMD64ADDQload(v)
36 case OpAMD64ADDQmodify:
37 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
38 case OpAMD64ADDSD:
39 return rewriteValueAMD64_OpAMD64ADDSD(v)
40 case OpAMD64ADDSDload:
41 return rewriteValueAMD64_OpAMD64ADDSDload(v)
42 case OpAMD64ADDSS:
43 return rewriteValueAMD64_OpAMD64ADDSS(v)
44 case OpAMD64ADDSSload:
45 return rewriteValueAMD64_OpAMD64ADDSSload(v)
46 case OpAMD64ANDL:
47 return rewriteValueAMD64_OpAMD64ANDL(v)
48 case OpAMD64ANDLconst:
49 return rewriteValueAMD64_OpAMD64ANDLconst(v)
50 case OpAMD64ANDLconstmodify:
51 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
52 case OpAMD64ANDLload:
53 return rewriteValueAMD64_OpAMD64ANDLload(v)
54 case OpAMD64ANDLmodify:
55 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
56 case OpAMD64ANDNL:
57 return rewriteValueAMD64_OpAMD64ANDNL(v)
58 case OpAMD64ANDNQ:
59 return rewriteValueAMD64_OpAMD64ANDNQ(v)
60 case OpAMD64ANDQ:
61 return rewriteValueAMD64_OpAMD64ANDQ(v)
62 case OpAMD64ANDQconst:
63 return rewriteValueAMD64_OpAMD64ANDQconst(v)
64 case OpAMD64ANDQconstmodify:
65 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
66 case OpAMD64ANDQload:
67 return rewriteValueAMD64_OpAMD64ANDQload(v)
68 case OpAMD64ANDQmodify:
69 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
70 case OpAMD64BSFQ:
71 return rewriteValueAMD64_OpAMD64BSFQ(v)
72 case OpAMD64BSWAPL:
73 return rewriteValueAMD64_OpAMD64BSWAPL(v)
74 case OpAMD64BSWAPQ:
75 return rewriteValueAMD64_OpAMD64BSWAPQ(v)
76 case OpAMD64BTCQconst:
77 return rewriteValueAMD64_OpAMD64BTCQconst(v)
78 case OpAMD64BTLconst:
79 return rewriteValueAMD64_OpAMD64BTLconst(v)
80 case OpAMD64BTQconst:
81 return rewriteValueAMD64_OpAMD64BTQconst(v)
82 case OpAMD64BTRQconst:
83 return rewriteValueAMD64_OpAMD64BTRQconst(v)
84 case OpAMD64BTSQconst:
85 return rewriteValueAMD64_OpAMD64BTSQconst(v)
86 case OpAMD64CMOVLCC:
87 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
88 case OpAMD64CMOVLCS:
89 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
90 case OpAMD64CMOVLEQ:
91 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
92 case OpAMD64CMOVLGE:
93 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
94 case OpAMD64CMOVLGT:
95 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
96 case OpAMD64CMOVLHI:
97 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
98 case OpAMD64CMOVLLE:
99 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
100 case OpAMD64CMOVLLS:
101 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
102 case OpAMD64CMOVLLT:
103 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
104 case OpAMD64CMOVLNE:
105 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
106 case OpAMD64CMOVQCC:
107 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
108 case OpAMD64CMOVQCS:
109 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
110 case OpAMD64CMOVQEQ:
111 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
112 case OpAMD64CMOVQGE:
113 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
114 case OpAMD64CMOVQGT:
115 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
116 case OpAMD64CMOVQHI:
117 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
118 case OpAMD64CMOVQLE:
119 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
120 case OpAMD64CMOVQLS:
121 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
122 case OpAMD64CMOVQLT:
123 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
124 case OpAMD64CMOVQNE:
125 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
126 case OpAMD64CMOVWCC:
127 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
128 case OpAMD64CMOVWCS:
129 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
130 case OpAMD64CMOVWEQ:
131 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
132 case OpAMD64CMOVWGE:
133 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
134 case OpAMD64CMOVWGT:
135 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
136 case OpAMD64CMOVWHI:
137 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
138 case OpAMD64CMOVWLE:
139 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
140 case OpAMD64CMOVWLS:
141 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
142 case OpAMD64CMOVWLT:
143 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
144 case OpAMD64CMOVWNE:
145 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
146 case OpAMD64CMPB:
147 return rewriteValueAMD64_OpAMD64CMPB(v)
148 case OpAMD64CMPBconst:
149 return rewriteValueAMD64_OpAMD64CMPBconst(v)
150 case OpAMD64CMPBconstload:
151 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
152 case OpAMD64CMPBload:
153 return rewriteValueAMD64_OpAMD64CMPBload(v)
154 case OpAMD64CMPL:
155 return rewriteValueAMD64_OpAMD64CMPL(v)
156 case OpAMD64CMPLconst:
157 return rewriteValueAMD64_OpAMD64CMPLconst(v)
158 case OpAMD64CMPLconstload:
159 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
160 case OpAMD64CMPLload:
161 return rewriteValueAMD64_OpAMD64CMPLload(v)
162 case OpAMD64CMPQ:
163 return rewriteValueAMD64_OpAMD64CMPQ(v)
164 case OpAMD64CMPQconst:
165 return rewriteValueAMD64_OpAMD64CMPQconst(v)
166 case OpAMD64CMPQconstload:
167 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
168 case OpAMD64CMPQload:
169 return rewriteValueAMD64_OpAMD64CMPQload(v)
170 case OpAMD64CMPW:
171 return rewriteValueAMD64_OpAMD64CMPW(v)
172 case OpAMD64CMPWconst:
173 return rewriteValueAMD64_OpAMD64CMPWconst(v)
174 case OpAMD64CMPWconstload:
175 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
176 case OpAMD64CMPWload:
177 return rewriteValueAMD64_OpAMD64CMPWload(v)
178 case OpAMD64CMPXCHGLlock:
179 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
180 case OpAMD64CMPXCHGQlock:
181 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
182 case OpAMD64DIVSD:
183 return rewriteValueAMD64_OpAMD64DIVSD(v)
184 case OpAMD64DIVSDload:
185 return rewriteValueAMD64_OpAMD64DIVSDload(v)
186 case OpAMD64DIVSS:
187 return rewriteValueAMD64_OpAMD64DIVSS(v)
188 case OpAMD64DIVSSload:
189 return rewriteValueAMD64_OpAMD64DIVSSload(v)
190 case OpAMD64HMULL:
191 return rewriteValueAMD64_OpAMD64HMULL(v)
192 case OpAMD64HMULLU:
193 return rewriteValueAMD64_OpAMD64HMULLU(v)
194 case OpAMD64HMULQ:
195 return rewriteValueAMD64_OpAMD64HMULQ(v)
196 case OpAMD64HMULQU:
197 return rewriteValueAMD64_OpAMD64HMULQU(v)
198 case OpAMD64LEAL:
199 return rewriteValueAMD64_OpAMD64LEAL(v)
200 case OpAMD64LEAL1:
201 return rewriteValueAMD64_OpAMD64LEAL1(v)
202 case OpAMD64LEAL2:
203 return rewriteValueAMD64_OpAMD64LEAL2(v)
204 case OpAMD64LEAL4:
205 return rewriteValueAMD64_OpAMD64LEAL4(v)
206 case OpAMD64LEAL8:
207 return rewriteValueAMD64_OpAMD64LEAL8(v)
208 case OpAMD64LEAQ:
209 return rewriteValueAMD64_OpAMD64LEAQ(v)
210 case OpAMD64LEAQ1:
211 return rewriteValueAMD64_OpAMD64LEAQ1(v)
212 case OpAMD64LEAQ2:
213 return rewriteValueAMD64_OpAMD64LEAQ2(v)
214 case OpAMD64LEAQ4:
215 return rewriteValueAMD64_OpAMD64LEAQ4(v)
216 case OpAMD64LEAQ8:
217 return rewriteValueAMD64_OpAMD64LEAQ8(v)
218 case OpAMD64MOVBELstore:
219 return rewriteValueAMD64_OpAMD64MOVBELstore(v)
220 case OpAMD64MOVBEQstore:
221 return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
222 case OpAMD64MOVBEWstore:
223 return rewriteValueAMD64_OpAMD64MOVBEWstore(v)
224 case OpAMD64MOVBQSX:
225 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
226 case OpAMD64MOVBQSXload:
227 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
228 case OpAMD64MOVBQZX:
229 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
230 case OpAMD64MOVBatomicload:
231 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
232 case OpAMD64MOVBload:
233 return rewriteValueAMD64_OpAMD64MOVBload(v)
234 case OpAMD64MOVBstore:
235 return rewriteValueAMD64_OpAMD64MOVBstore(v)
236 case OpAMD64MOVBstoreconst:
237 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
238 case OpAMD64MOVLQSX:
239 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
240 case OpAMD64MOVLQSXload:
241 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
242 case OpAMD64MOVLQZX:
243 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
244 case OpAMD64MOVLatomicload:
245 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
246 case OpAMD64MOVLf2i:
247 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
248 case OpAMD64MOVLi2f:
249 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
250 case OpAMD64MOVLload:
251 return rewriteValueAMD64_OpAMD64MOVLload(v)
252 case OpAMD64MOVLstore:
253 return rewriteValueAMD64_OpAMD64MOVLstore(v)
254 case OpAMD64MOVLstoreconst:
255 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
256 case OpAMD64MOVOload:
257 return rewriteValueAMD64_OpAMD64MOVOload(v)
258 case OpAMD64MOVOstore:
259 return rewriteValueAMD64_OpAMD64MOVOstore(v)
260 case OpAMD64MOVOstoreconst:
261 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
262 case OpAMD64MOVQatomicload:
263 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
264 case OpAMD64MOVQf2i:
265 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
266 case OpAMD64MOVQi2f:
267 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
268 case OpAMD64MOVQload:
269 return rewriteValueAMD64_OpAMD64MOVQload(v)
270 case OpAMD64MOVQstore:
271 return rewriteValueAMD64_OpAMD64MOVQstore(v)
272 case OpAMD64MOVQstoreconst:
273 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
274 case OpAMD64MOVSDload:
275 return rewriteValueAMD64_OpAMD64MOVSDload(v)
276 case OpAMD64MOVSDstore:
277 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
278 case OpAMD64MOVSSload:
279 return rewriteValueAMD64_OpAMD64MOVSSload(v)
280 case OpAMD64MOVSSstore:
281 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
282 case OpAMD64MOVWQSX:
283 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
284 case OpAMD64MOVWQSXload:
285 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
286 case OpAMD64MOVWQZX:
287 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
288 case OpAMD64MOVWload:
289 return rewriteValueAMD64_OpAMD64MOVWload(v)
290 case OpAMD64MOVWstore:
291 return rewriteValueAMD64_OpAMD64MOVWstore(v)
292 case OpAMD64MOVWstoreconst:
293 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
294 case OpAMD64MULL:
295 return rewriteValueAMD64_OpAMD64MULL(v)
296 case OpAMD64MULLconst:
297 return rewriteValueAMD64_OpAMD64MULLconst(v)
298 case OpAMD64MULQ:
299 return rewriteValueAMD64_OpAMD64MULQ(v)
300 case OpAMD64MULQconst:
301 return rewriteValueAMD64_OpAMD64MULQconst(v)
302 case OpAMD64MULSD:
303 return rewriteValueAMD64_OpAMD64MULSD(v)
304 case OpAMD64MULSDload:
305 return rewriteValueAMD64_OpAMD64MULSDload(v)
306 case OpAMD64MULSS:
307 return rewriteValueAMD64_OpAMD64MULSS(v)
308 case OpAMD64MULSSload:
309 return rewriteValueAMD64_OpAMD64MULSSload(v)
310 case OpAMD64NEGL:
311 return rewriteValueAMD64_OpAMD64NEGL(v)
312 case OpAMD64NEGQ:
313 return rewriteValueAMD64_OpAMD64NEGQ(v)
314 case OpAMD64NOTL:
315 return rewriteValueAMD64_OpAMD64NOTL(v)
316 case OpAMD64NOTQ:
317 return rewriteValueAMD64_OpAMD64NOTQ(v)
318 case OpAMD64ORL:
319 return rewriteValueAMD64_OpAMD64ORL(v)
320 case OpAMD64ORLconst:
321 return rewriteValueAMD64_OpAMD64ORLconst(v)
322 case OpAMD64ORLconstmodify:
323 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
324 case OpAMD64ORLload:
325 return rewriteValueAMD64_OpAMD64ORLload(v)
326 case OpAMD64ORLmodify:
327 return rewriteValueAMD64_OpAMD64ORLmodify(v)
328 case OpAMD64ORQ:
329 return rewriteValueAMD64_OpAMD64ORQ(v)
330 case OpAMD64ORQconst:
331 return rewriteValueAMD64_OpAMD64ORQconst(v)
332 case OpAMD64ORQconstmodify:
333 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
334 case OpAMD64ORQload:
335 return rewriteValueAMD64_OpAMD64ORQload(v)
336 case OpAMD64ORQmodify:
337 return rewriteValueAMD64_OpAMD64ORQmodify(v)
338 case OpAMD64ROLB:
339 return rewriteValueAMD64_OpAMD64ROLB(v)
340 case OpAMD64ROLBconst:
341 return rewriteValueAMD64_OpAMD64ROLBconst(v)
342 case OpAMD64ROLL:
343 return rewriteValueAMD64_OpAMD64ROLL(v)
344 case OpAMD64ROLLconst:
345 return rewriteValueAMD64_OpAMD64ROLLconst(v)
346 case OpAMD64ROLQ:
347 return rewriteValueAMD64_OpAMD64ROLQ(v)
348 case OpAMD64ROLQconst:
349 return rewriteValueAMD64_OpAMD64ROLQconst(v)
350 case OpAMD64ROLW:
351 return rewriteValueAMD64_OpAMD64ROLW(v)
352 case OpAMD64ROLWconst:
353 return rewriteValueAMD64_OpAMD64ROLWconst(v)
354 case OpAMD64RORB:
355 return rewriteValueAMD64_OpAMD64RORB(v)
356 case OpAMD64RORL:
357 return rewriteValueAMD64_OpAMD64RORL(v)
358 case OpAMD64RORQ:
359 return rewriteValueAMD64_OpAMD64RORQ(v)
360 case OpAMD64RORW:
361 return rewriteValueAMD64_OpAMD64RORW(v)
362 case OpAMD64SARB:
363 return rewriteValueAMD64_OpAMD64SARB(v)
364 case OpAMD64SARBconst:
365 return rewriteValueAMD64_OpAMD64SARBconst(v)
366 case OpAMD64SARL:
367 return rewriteValueAMD64_OpAMD64SARL(v)
368 case OpAMD64SARLconst:
369 return rewriteValueAMD64_OpAMD64SARLconst(v)
370 case OpAMD64SARQ:
371 return rewriteValueAMD64_OpAMD64SARQ(v)
372 case OpAMD64SARQconst:
373 return rewriteValueAMD64_OpAMD64SARQconst(v)
374 case OpAMD64SARW:
375 return rewriteValueAMD64_OpAMD64SARW(v)
376 case OpAMD64SARWconst:
377 return rewriteValueAMD64_OpAMD64SARWconst(v)
378 case OpAMD64SARXLload:
379 return rewriteValueAMD64_OpAMD64SARXLload(v)
380 case OpAMD64SARXQload:
381 return rewriteValueAMD64_OpAMD64SARXQload(v)
382 case OpAMD64SBBLcarrymask:
383 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
384 case OpAMD64SBBQ:
385 return rewriteValueAMD64_OpAMD64SBBQ(v)
386 case OpAMD64SBBQcarrymask:
387 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
388 case OpAMD64SBBQconst:
389 return rewriteValueAMD64_OpAMD64SBBQconst(v)
390 case OpAMD64SETA:
391 return rewriteValueAMD64_OpAMD64SETA(v)
392 case OpAMD64SETAE:
393 return rewriteValueAMD64_OpAMD64SETAE(v)
394 case OpAMD64SETAEstore:
395 return rewriteValueAMD64_OpAMD64SETAEstore(v)
396 case OpAMD64SETAstore:
397 return rewriteValueAMD64_OpAMD64SETAstore(v)
398 case OpAMD64SETB:
399 return rewriteValueAMD64_OpAMD64SETB(v)
400 case OpAMD64SETBE:
401 return rewriteValueAMD64_OpAMD64SETBE(v)
402 case OpAMD64SETBEstore:
403 return rewriteValueAMD64_OpAMD64SETBEstore(v)
404 case OpAMD64SETBstore:
405 return rewriteValueAMD64_OpAMD64SETBstore(v)
406 case OpAMD64SETEQ:
407 return rewriteValueAMD64_OpAMD64SETEQ(v)
408 case OpAMD64SETEQstore:
409 return rewriteValueAMD64_OpAMD64SETEQstore(v)
410 case OpAMD64SETG:
411 return rewriteValueAMD64_OpAMD64SETG(v)
412 case OpAMD64SETGE:
413 return rewriteValueAMD64_OpAMD64SETGE(v)
414 case OpAMD64SETGEstore:
415 return rewriteValueAMD64_OpAMD64SETGEstore(v)
416 case OpAMD64SETGstore:
417 return rewriteValueAMD64_OpAMD64SETGstore(v)
418 case OpAMD64SETL:
419 return rewriteValueAMD64_OpAMD64SETL(v)
420 case OpAMD64SETLE:
421 return rewriteValueAMD64_OpAMD64SETLE(v)
422 case OpAMD64SETLEstore:
423 return rewriteValueAMD64_OpAMD64SETLEstore(v)
424 case OpAMD64SETLstore:
425 return rewriteValueAMD64_OpAMD64SETLstore(v)
426 case OpAMD64SETNE:
427 return rewriteValueAMD64_OpAMD64SETNE(v)
428 case OpAMD64SETNEstore:
429 return rewriteValueAMD64_OpAMD64SETNEstore(v)
430 case OpAMD64SHLL:
431 return rewriteValueAMD64_OpAMD64SHLL(v)
432 case OpAMD64SHLLconst:
433 return rewriteValueAMD64_OpAMD64SHLLconst(v)
434 case OpAMD64SHLQ:
435 return rewriteValueAMD64_OpAMD64SHLQ(v)
436 case OpAMD64SHLQconst:
437 return rewriteValueAMD64_OpAMD64SHLQconst(v)
438 case OpAMD64SHLXLload:
439 return rewriteValueAMD64_OpAMD64SHLXLload(v)
440 case OpAMD64SHLXQload:
441 return rewriteValueAMD64_OpAMD64SHLXQload(v)
442 case OpAMD64SHRB:
443 return rewriteValueAMD64_OpAMD64SHRB(v)
444 case OpAMD64SHRBconst:
445 return rewriteValueAMD64_OpAMD64SHRBconst(v)
446 case OpAMD64SHRL:
447 return rewriteValueAMD64_OpAMD64SHRL(v)
448 case OpAMD64SHRLconst:
449 return rewriteValueAMD64_OpAMD64SHRLconst(v)
450 case OpAMD64SHRQ:
451 return rewriteValueAMD64_OpAMD64SHRQ(v)
452 case OpAMD64SHRQconst:
453 return rewriteValueAMD64_OpAMD64SHRQconst(v)
454 case OpAMD64SHRW:
455 return rewriteValueAMD64_OpAMD64SHRW(v)
456 case OpAMD64SHRWconst:
457 return rewriteValueAMD64_OpAMD64SHRWconst(v)
458 case OpAMD64SHRXLload:
459 return rewriteValueAMD64_OpAMD64SHRXLload(v)
460 case OpAMD64SHRXQload:
461 return rewriteValueAMD64_OpAMD64SHRXQload(v)
462 case OpAMD64SUBL:
463 return rewriteValueAMD64_OpAMD64SUBL(v)
464 case OpAMD64SUBLconst:
465 return rewriteValueAMD64_OpAMD64SUBLconst(v)
466 case OpAMD64SUBLload:
467 return rewriteValueAMD64_OpAMD64SUBLload(v)
468 case OpAMD64SUBLmodify:
469 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
470 case OpAMD64SUBQ:
471 return rewriteValueAMD64_OpAMD64SUBQ(v)
472 case OpAMD64SUBQborrow:
473 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
474 case OpAMD64SUBQconst:
475 return rewriteValueAMD64_OpAMD64SUBQconst(v)
476 case OpAMD64SUBQload:
477 return rewriteValueAMD64_OpAMD64SUBQload(v)
478 case OpAMD64SUBQmodify:
479 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
480 case OpAMD64SUBSD:
481 return rewriteValueAMD64_OpAMD64SUBSD(v)
482 case OpAMD64SUBSDload:
483 return rewriteValueAMD64_OpAMD64SUBSDload(v)
484 case OpAMD64SUBSS:
485 return rewriteValueAMD64_OpAMD64SUBSS(v)
486 case OpAMD64SUBSSload:
487 return rewriteValueAMD64_OpAMD64SUBSSload(v)
488 case OpAMD64TESTB:
489 return rewriteValueAMD64_OpAMD64TESTB(v)
490 case OpAMD64TESTBconst:
491 return rewriteValueAMD64_OpAMD64TESTBconst(v)
492 case OpAMD64TESTL:
493 return rewriteValueAMD64_OpAMD64TESTL(v)
494 case OpAMD64TESTLconst:
495 return rewriteValueAMD64_OpAMD64TESTLconst(v)
496 case OpAMD64TESTQ:
497 return rewriteValueAMD64_OpAMD64TESTQ(v)
498 case OpAMD64TESTQconst:
499 return rewriteValueAMD64_OpAMD64TESTQconst(v)
500 case OpAMD64TESTW:
501 return rewriteValueAMD64_OpAMD64TESTW(v)
502 case OpAMD64TESTWconst:
503 return rewriteValueAMD64_OpAMD64TESTWconst(v)
504 case OpAMD64XADDLlock:
505 return rewriteValueAMD64_OpAMD64XADDLlock(v)
506 case OpAMD64XADDQlock:
507 return rewriteValueAMD64_OpAMD64XADDQlock(v)
508 case OpAMD64XCHGL:
509 return rewriteValueAMD64_OpAMD64XCHGL(v)
510 case OpAMD64XCHGQ:
511 return rewriteValueAMD64_OpAMD64XCHGQ(v)
512 case OpAMD64XORL:
513 return rewriteValueAMD64_OpAMD64XORL(v)
514 case OpAMD64XORLconst:
515 return rewriteValueAMD64_OpAMD64XORLconst(v)
516 case OpAMD64XORLconstmodify:
517 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
518 case OpAMD64XORLload:
519 return rewriteValueAMD64_OpAMD64XORLload(v)
520 case OpAMD64XORLmodify:
521 return rewriteValueAMD64_OpAMD64XORLmodify(v)
522 case OpAMD64XORQ:
523 return rewriteValueAMD64_OpAMD64XORQ(v)
524 case OpAMD64XORQconst:
525 return rewriteValueAMD64_OpAMD64XORQconst(v)
526 case OpAMD64XORQconstmodify:
527 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
528 case OpAMD64XORQload:
529 return rewriteValueAMD64_OpAMD64XORQload(v)
530 case OpAMD64XORQmodify:
531 return rewriteValueAMD64_OpAMD64XORQmodify(v)
532 case OpAdd16:
533 v.Op = OpAMD64ADDL
534 return true
535 case OpAdd32:
536 v.Op = OpAMD64ADDL
537 return true
538 case OpAdd32F:
539 v.Op = OpAMD64ADDSS
540 return true
541 case OpAdd64:
542 v.Op = OpAMD64ADDQ
543 return true
544 case OpAdd64F:
545 v.Op = OpAMD64ADDSD
546 return true
547 case OpAdd8:
548 v.Op = OpAMD64ADDL
549 return true
550 case OpAddPtr:
551 v.Op = OpAMD64ADDQ
552 return true
553 case OpAddr:
554 return rewriteValueAMD64_OpAddr(v)
555 case OpAnd16:
556 v.Op = OpAMD64ANDL
557 return true
558 case OpAnd32:
559 v.Op = OpAMD64ANDL
560 return true
561 case OpAnd64:
562 v.Op = OpAMD64ANDQ
563 return true
564 case OpAnd8:
565 v.Op = OpAMD64ANDL
566 return true
567 case OpAndB:
568 v.Op = OpAMD64ANDL
569 return true
570 case OpAtomicAdd32:
571 return rewriteValueAMD64_OpAtomicAdd32(v)
572 case OpAtomicAdd64:
573 return rewriteValueAMD64_OpAtomicAdd64(v)
574 case OpAtomicAnd32:
575 return rewriteValueAMD64_OpAtomicAnd32(v)
576 case OpAtomicAnd32value:
577 return rewriteValueAMD64_OpAtomicAnd32value(v)
578 case OpAtomicAnd64value:
579 return rewriteValueAMD64_OpAtomicAnd64value(v)
580 case OpAtomicAnd8:
581 return rewriteValueAMD64_OpAtomicAnd8(v)
582 case OpAtomicCompareAndSwap32:
583 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
584 case OpAtomicCompareAndSwap64:
585 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
586 case OpAtomicExchange32:
587 return rewriteValueAMD64_OpAtomicExchange32(v)
588 case OpAtomicExchange64:
589 return rewriteValueAMD64_OpAtomicExchange64(v)
590 case OpAtomicExchange8:
591 return rewriteValueAMD64_OpAtomicExchange8(v)
592 case OpAtomicLoad32:
593 return rewriteValueAMD64_OpAtomicLoad32(v)
594 case OpAtomicLoad64:
595 return rewriteValueAMD64_OpAtomicLoad64(v)
596 case OpAtomicLoad8:
597 return rewriteValueAMD64_OpAtomicLoad8(v)
598 case OpAtomicLoadPtr:
599 return rewriteValueAMD64_OpAtomicLoadPtr(v)
600 case OpAtomicOr32:
601 return rewriteValueAMD64_OpAtomicOr32(v)
602 case OpAtomicOr32value:
603 return rewriteValueAMD64_OpAtomicOr32value(v)
604 case OpAtomicOr64value:
605 return rewriteValueAMD64_OpAtomicOr64value(v)
606 case OpAtomicOr8:
607 return rewriteValueAMD64_OpAtomicOr8(v)
608 case OpAtomicStore32:
609 return rewriteValueAMD64_OpAtomicStore32(v)
610 case OpAtomicStore64:
611 return rewriteValueAMD64_OpAtomicStore64(v)
612 case OpAtomicStore8:
613 return rewriteValueAMD64_OpAtomicStore8(v)
614 case OpAtomicStorePtrNoWB:
615 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
616 case OpAvg64u:
617 v.Op = OpAMD64AVGQU
618 return true
619 case OpBitLen16:
620 return rewriteValueAMD64_OpBitLen16(v)
621 case OpBitLen32:
622 return rewriteValueAMD64_OpBitLen32(v)
623 case OpBitLen64:
624 return rewriteValueAMD64_OpBitLen64(v)
625 case OpBitLen8:
626 return rewriteValueAMD64_OpBitLen8(v)
627 case OpBswap16:
628 return rewriteValueAMD64_OpBswap16(v)
629 case OpBswap32:
630 v.Op = OpAMD64BSWAPL
631 return true
632 case OpBswap64:
633 v.Op = OpAMD64BSWAPQ
634 return true
635 case OpCeil:
636 return rewriteValueAMD64_OpCeil(v)
637 case OpClosureCall:
638 v.Op = OpAMD64CALLclosure
639 return true
640 case OpCom16:
641 v.Op = OpAMD64NOTL
642 return true
643 case OpCom32:
644 v.Op = OpAMD64NOTL
645 return true
646 case OpCom64:
647 v.Op = OpAMD64NOTQ
648 return true
649 case OpCom8:
650 v.Op = OpAMD64NOTL
651 return true
652 case OpCondSelect:
653 return rewriteValueAMD64_OpCondSelect(v)
654 case OpConst16:
655 return rewriteValueAMD64_OpConst16(v)
656 case OpConst32:
657 v.Op = OpAMD64MOVLconst
658 return true
659 case OpConst32F:
660 v.Op = OpAMD64MOVSSconst
661 return true
662 case OpConst64:
663 v.Op = OpAMD64MOVQconst
664 return true
665 case OpConst64F:
666 v.Op = OpAMD64MOVSDconst
667 return true
668 case OpConst8:
669 return rewriteValueAMD64_OpConst8(v)
670 case OpConstBool:
671 return rewriteValueAMD64_OpConstBool(v)
672 case OpConstNil:
673 return rewriteValueAMD64_OpConstNil(v)
674 case OpCtz16:
675 return rewriteValueAMD64_OpCtz16(v)
676 case OpCtz16NonZero:
677 return rewriteValueAMD64_OpCtz16NonZero(v)
678 case OpCtz32:
679 return rewriteValueAMD64_OpCtz32(v)
680 case OpCtz32NonZero:
681 return rewriteValueAMD64_OpCtz32NonZero(v)
682 case OpCtz64:
683 return rewriteValueAMD64_OpCtz64(v)
684 case OpCtz64NonZero:
685 return rewriteValueAMD64_OpCtz64NonZero(v)
686 case OpCtz8:
687 return rewriteValueAMD64_OpCtz8(v)
688 case OpCtz8NonZero:
689 return rewriteValueAMD64_OpCtz8NonZero(v)
690 case OpCvt32Fto32:
691 v.Op = OpAMD64CVTTSS2SL
692 return true
693 case OpCvt32Fto64:
694 v.Op = OpAMD64CVTTSS2SQ
695 return true
696 case OpCvt32Fto64F:
697 v.Op = OpAMD64CVTSS2SD
698 return true
699 case OpCvt32to32F:
700 v.Op = OpAMD64CVTSL2SS
701 return true
702 case OpCvt32to64F:
703 v.Op = OpAMD64CVTSL2SD
704 return true
705 case OpCvt64Fto32:
706 v.Op = OpAMD64CVTTSD2SL
707 return true
708 case OpCvt64Fto32F:
709 v.Op = OpAMD64CVTSD2SS
710 return true
711 case OpCvt64Fto64:
712 v.Op = OpAMD64CVTTSD2SQ
713 return true
714 case OpCvt64to32F:
715 v.Op = OpAMD64CVTSQ2SS
716 return true
717 case OpCvt64to64F:
718 v.Op = OpAMD64CVTSQ2SD
719 return true
720 case OpCvtBoolToUint8:
721 v.Op = OpCopy
722 return true
723 case OpDiv128u:
724 v.Op = OpAMD64DIVQU2
725 return true
726 case OpDiv16:
727 return rewriteValueAMD64_OpDiv16(v)
728 case OpDiv16u:
729 return rewriteValueAMD64_OpDiv16u(v)
730 case OpDiv32:
731 return rewriteValueAMD64_OpDiv32(v)
732 case OpDiv32F:
733 v.Op = OpAMD64DIVSS
734 return true
735 case OpDiv32u:
736 return rewriteValueAMD64_OpDiv32u(v)
737 case OpDiv64:
738 return rewriteValueAMD64_OpDiv64(v)
739 case OpDiv64F:
740 v.Op = OpAMD64DIVSD
741 return true
742 case OpDiv64u:
743 return rewriteValueAMD64_OpDiv64u(v)
744 case OpDiv8:
745 return rewriteValueAMD64_OpDiv8(v)
746 case OpDiv8u:
747 return rewriteValueAMD64_OpDiv8u(v)
748 case OpEq16:
749 return rewriteValueAMD64_OpEq16(v)
750 case OpEq32:
751 return rewriteValueAMD64_OpEq32(v)
752 case OpEq32F:
753 return rewriteValueAMD64_OpEq32F(v)
754 case OpEq64:
755 return rewriteValueAMD64_OpEq64(v)
756 case OpEq64F:
757 return rewriteValueAMD64_OpEq64F(v)
758 case OpEq8:
759 return rewriteValueAMD64_OpEq8(v)
760 case OpEqB:
761 return rewriteValueAMD64_OpEqB(v)
762 case OpEqPtr:
763 return rewriteValueAMD64_OpEqPtr(v)
764 case OpFMA:
765 return rewriteValueAMD64_OpFMA(v)
766 case OpFloor:
767 return rewriteValueAMD64_OpFloor(v)
768 case OpGetCallerPC:
769 v.Op = OpAMD64LoweredGetCallerPC
770 return true
771 case OpGetCallerSP:
772 v.Op = OpAMD64LoweredGetCallerSP
773 return true
774 case OpGetClosurePtr:
775 v.Op = OpAMD64LoweredGetClosurePtr
776 return true
777 case OpGetG:
778 return rewriteValueAMD64_OpGetG(v)
779 case OpHasCPUFeature:
780 return rewriteValueAMD64_OpHasCPUFeature(v)
781 case OpHmul32:
782 v.Op = OpAMD64HMULL
783 return true
784 case OpHmul32u:
785 v.Op = OpAMD64HMULLU
786 return true
787 case OpHmul64:
788 v.Op = OpAMD64HMULQ
789 return true
790 case OpHmul64u:
791 v.Op = OpAMD64HMULQU
792 return true
793 case OpInterCall:
794 v.Op = OpAMD64CALLinter
795 return true
796 case OpIsInBounds:
797 return rewriteValueAMD64_OpIsInBounds(v)
798 case OpIsNonNil:
799 return rewriteValueAMD64_OpIsNonNil(v)
800 case OpIsSliceInBounds:
801 return rewriteValueAMD64_OpIsSliceInBounds(v)
802 case OpLeq16:
803 return rewriteValueAMD64_OpLeq16(v)
804 case OpLeq16U:
805 return rewriteValueAMD64_OpLeq16U(v)
806 case OpLeq32:
807 return rewriteValueAMD64_OpLeq32(v)
808 case OpLeq32F:
809 return rewriteValueAMD64_OpLeq32F(v)
810 case OpLeq32U:
811 return rewriteValueAMD64_OpLeq32U(v)
812 case OpLeq64:
813 return rewriteValueAMD64_OpLeq64(v)
814 case OpLeq64F:
815 return rewriteValueAMD64_OpLeq64F(v)
816 case OpLeq64U:
817 return rewriteValueAMD64_OpLeq64U(v)
818 case OpLeq8:
819 return rewriteValueAMD64_OpLeq8(v)
820 case OpLeq8U:
821 return rewriteValueAMD64_OpLeq8U(v)
822 case OpLess16:
823 return rewriteValueAMD64_OpLess16(v)
824 case OpLess16U:
825 return rewriteValueAMD64_OpLess16U(v)
826 case OpLess32:
827 return rewriteValueAMD64_OpLess32(v)
828 case OpLess32F:
829 return rewriteValueAMD64_OpLess32F(v)
830 case OpLess32U:
831 return rewriteValueAMD64_OpLess32U(v)
832 case OpLess64:
833 return rewriteValueAMD64_OpLess64(v)
834 case OpLess64F:
835 return rewriteValueAMD64_OpLess64F(v)
836 case OpLess64U:
837 return rewriteValueAMD64_OpLess64U(v)
838 case OpLess8:
839 return rewriteValueAMD64_OpLess8(v)
840 case OpLess8U:
841 return rewriteValueAMD64_OpLess8U(v)
842 case OpLoad:
843 return rewriteValueAMD64_OpLoad(v)
844 case OpLocalAddr:
845 return rewriteValueAMD64_OpLocalAddr(v)
846 case OpLsh16x16:
847 return rewriteValueAMD64_OpLsh16x16(v)
848 case OpLsh16x32:
849 return rewriteValueAMD64_OpLsh16x32(v)
850 case OpLsh16x64:
851 return rewriteValueAMD64_OpLsh16x64(v)
852 case OpLsh16x8:
853 return rewriteValueAMD64_OpLsh16x8(v)
854 case OpLsh32x16:
855 return rewriteValueAMD64_OpLsh32x16(v)
856 case OpLsh32x32:
857 return rewriteValueAMD64_OpLsh32x32(v)
858 case OpLsh32x64:
859 return rewriteValueAMD64_OpLsh32x64(v)
860 case OpLsh32x8:
861 return rewriteValueAMD64_OpLsh32x8(v)
862 case OpLsh64x16:
863 return rewriteValueAMD64_OpLsh64x16(v)
864 case OpLsh64x32:
865 return rewriteValueAMD64_OpLsh64x32(v)
866 case OpLsh64x64:
867 return rewriteValueAMD64_OpLsh64x64(v)
868 case OpLsh64x8:
869 return rewriteValueAMD64_OpLsh64x8(v)
870 case OpLsh8x16:
871 return rewriteValueAMD64_OpLsh8x16(v)
872 case OpLsh8x32:
873 return rewriteValueAMD64_OpLsh8x32(v)
874 case OpLsh8x64:
875 return rewriteValueAMD64_OpLsh8x64(v)
876 case OpLsh8x8:
877 return rewriteValueAMD64_OpLsh8x8(v)
878 case OpMax32F:
879 return rewriteValueAMD64_OpMax32F(v)
880 case OpMax64F:
881 return rewriteValueAMD64_OpMax64F(v)
882 case OpMin32F:
883 return rewriteValueAMD64_OpMin32F(v)
884 case OpMin64F:
885 return rewriteValueAMD64_OpMin64F(v)
886 case OpMod16:
887 return rewriteValueAMD64_OpMod16(v)
888 case OpMod16u:
889 return rewriteValueAMD64_OpMod16u(v)
890 case OpMod32:
891 return rewriteValueAMD64_OpMod32(v)
892 case OpMod32u:
893 return rewriteValueAMD64_OpMod32u(v)
894 case OpMod64:
895 return rewriteValueAMD64_OpMod64(v)
896 case OpMod64u:
897 return rewriteValueAMD64_OpMod64u(v)
898 case OpMod8:
899 return rewriteValueAMD64_OpMod8(v)
900 case OpMod8u:
901 return rewriteValueAMD64_OpMod8u(v)
902 case OpMove:
903 return rewriteValueAMD64_OpMove(v)
904 case OpMul16:
905 v.Op = OpAMD64MULL
906 return true
907 case OpMul32:
908 v.Op = OpAMD64MULL
909 return true
910 case OpMul32F:
911 v.Op = OpAMD64MULSS
912 return true
913 case OpMul64:
914 v.Op = OpAMD64MULQ
915 return true
916 case OpMul64F:
917 v.Op = OpAMD64MULSD
918 return true
919 case OpMul64uhilo:
920 v.Op = OpAMD64MULQU2
921 return true
922 case OpMul8:
923 v.Op = OpAMD64MULL
924 return true
925 case OpNeg16:
926 v.Op = OpAMD64NEGL
927 return true
928 case OpNeg32:
929 v.Op = OpAMD64NEGL
930 return true
931 case OpNeg32F:
932 return rewriteValueAMD64_OpNeg32F(v)
933 case OpNeg64:
934 v.Op = OpAMD64NEGQ
935 return true
936 case OpNeg64F:
937 return rewriteValueAMD64_OpNeg64F(v)
938 case OpNeg8:
939 v.Op = OpAMD64NEGL
940 return true
941 case OpNeq16:
942 return rewriteValueAMD64_OpNeq16(v)
943 case OpNeq32:
944 return rewriteValueAMD64_OpNeq32(v)
945 case OpNeq32F:
946 return rewriteValueAMD64_OpNeq32F(v)
947 case OpNeq64:
948 return rewriteValueAMD64_OpNeq64(v)
949 case OpNeq64F:
950 return rewriteValueAMD64_OpNeq64F(v)
951 case OpNeq8:
952 return rewriteValueAMD64_OpNeq8(v)
953 case OpNeqB:
954 return rewriteValueAMD64_OpNeqB(v)
955 case OpNeqPtr:
956 return rewriteValueAMD64_OpNeqPtr(v)
957 case OpNilCheck:
958 v.Op = OpAMD64LoweredNilCheck
959 return true
960 case OpNot:
961 return rewriteValueAMD64_OpNot(v)
962 case OpOffPtr:
963 return rewriteValueAMD64_OpOffPtr(v)
964 case OpOr16:
965 v.Op = OpAMD64ORL
966 return true
967 case OpOr32:
968 v.Op = OpAMD64ORL
969 return true
970 case OpOr64:
971 v.Op = OpAMD64ORQ
972 return true
973 case OpOr8:
974 v.Op = OpAMD64ORL
975 return true
976 case OpOrB:
977 v.Op = OpAMD64ORL
978 return true
979 case OpPanicBounds:
980 return rewriteValueAMD64_OpPanicBounds(v)
981 case OpPopCount16:
982 return rewriteValueAMD64_OpPopCount16(v)
983 case OpPopCount32:
984 v.Op = OpAMD64POPCNTL
985 return true
986 case OpPopCount64:
987 v.Op = OpAMD64POPCNTQ
988 return true
989 case OpPopCount8:
990 return rewriteValueAMD64_OpPopCount8(v)
991 case OpPrefetchCache:
992 v.Op = OpAMD64PrefetchT0
993 return true
994 case OpPrefetchCacheStreamed:
995 v.Op = OpAMD64PrefetchNTA
996 return true
997 case OpRotateLeft16:
998 v.Op = OpAMD64ROLW
999 return true
1000 case OpRotateLeft32:
1001 v.Op = OpAMD64ROLL
1002 return true
1003 case OpRotateLeft64:
1004 v.Op = OpAMD64ROLQ
1005 return true
1006 case OpRotateLeft8:
1007 v.Op = OpAMD64ROLB
1008 return true
1009 case OpRound32F:
1010 v.Op = OpCopy
1011 return true
1012 case OpRound64F:
1013 v.Op = OpCopy
1014 return true
1015 case OpRoundToEven:
1016 return rewriteValueAMD64_OpRoundToEven(v)
1017 case OpRsh16Ux16:
1018 return rewriteValueAMD64_OpRsh16Ux16(v)
1019 case OpRsh16Ux32:
1020 return rewriteValueAMD64_OpRsh16Ux32(v)
1021 case OpRsh16Ux64:
1022 return rewriteValueAMD64_OpRsh16Ux64(v)
1023 case OpRsh16Ux8:
1024 return rewriteValueAMD64_OpRsh16Ux8(v)
1025 case OpRsh16x16:
1026 return rewriteValueAMD64_OpRsh16x16(v)
1027 case OpRsh16x32:
1028 return rewriteValueAMD64_OpRsh16x32(v)
1029 case OpRsh16x64:
1030 return rewriteValueAMD64_OpRsh16x64(v)
1031 case OpRsh16x8:
1032 return rewriteValueAMD64_OpRsh16x8(v)
1033 case OpRsh32Ux16:
1034 return rewriteValueAMD64_OpRsh32Ux16(v)
1035 case OpRsh32Ux32:
1036 return rewriteValueAMD64_OpRsh32Ux32(v)
1037 case OpRsh32Ux64:
1038 return rewriteValueAMD64_OpRsh32Ux64(v)
1039 case OpRsh32Ux8:
1040 return rewriteValueAMD64_OpRsh32Ux8(v)
1041 case OpRsh32x16:
1042 return rewriteValueAMD64_OpRsh32x16(v)
1043 case OpRsh32x32:
1044 return rewriteValueAMD64_OpRsh32x32(v)
1045 case OpRsh32x64:
1046 return rewriteValueAMD64_OpRsh32x64(v)
1047 case OpRsh32x8:
1048 return rewriteValueAMD64_OpRsh32x8(v)
1049 case OpRsh64Ux16:
1050 return rewriteValueAMD64_OpRsh64Ux16(v)
1051 case OpRsh64Ux32:
1052 return rewriteValueAMD64_OpRsh64Ux32(v)
1053 case OpRsh64Ux64:
1054 return rewriteValueAMD64_OpRsh64Ux64(v)
1055 case OpRsh64Ux8:
1056 return rewriteValueAMD64_OpRsh64Ux8(v)
1057 case OpRsh64x16:
1058 return rewriteValueAMD64_OpRsh64x16(v)
1059 case OpRsh64x32:
1060 return rewriteValueAMD64_OpRsh64x32(v)
1061 case OpRsh64x64:
1062 return rewriteValueAMD64_OpRsh64x64(v)
1063 case OpRsh64x8:
1064 return rewriteValueAMD64_OpRsh64x8(v)
1065 case OpRsh8Ux16:
1066 return rewriteValueAMD64_OpRsh8Ux16(v)
1067 case OpRsh8Ux32:
1068 return rewriteValueAMD64_OpRsh8Ux32(v)
1069 case OpRsh8Ux64:
1070 return rewriteValueAMD64_OpRsh8Ux64(v)
1071 case OpRsh8Ux8:
1072 return rewriteValueAMD64_OpRsh8Ux8(v)
1073 case OpRsh8x16:
1074 return rewriteValueAMD64_OpRsh8x16(v)
1075 case OpRsh8x32:
1076 return rewriteValueAMD64_OpRsh8x32(v)
1077 case OpRsh8x64:
1078 return rewriteValueAMD64_OpRsh8x64(v)
1079 case OpRsh8x8:
1080 return rewriteValueAMD64_OpRsh8x8(v)
1081 case OpSelect0:
1082 return rewriteValueAMD64_OpSelect0(v)
1083 case OpSelect1:
1084 return rewriteValueAMD64_OpSelect1(v)
1085 case OpSelectN:
1086 return rewriteValueAMD64_OpSelectN(v)
1087 case OpSignExt16to32:
1088 v.Op = OpAMD64MOVWQSX
1089 return true
1090 case OpSignExt16to64:
1091 v.Op = OpAMD64MOVWQSX
1092 return true
1093 case OpSignExt32to64:
1094 v.Op = OpAMD64MOVLQSX
1095 return true
1096 case OpSignExt8to16:
1097 v.Op = OpAMD64MOVBQSX
1098 return true
1099 case OpSignExt8to32:
1100 v.Op = OpAMD64MOVBQSX
1101 return true
1102 case OpSignExt8to64:
1103 v.Op = OpAMD64MOVBQSX
1104 return true
1105 case OpSlicemask:
1106 return rewriteValueAMD64_OpSlicemask(v)
1107 case OpSpectreIndex:
1108 return rewriteValueAMD64_OpSpectreIndex(v)
1109 case OpSpectreSliceIndex:
1110 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1111 case OpSqrt:
1112 v.Op = OpAMD64SQRTSD
1113 return true
1114 case OpSqrt32:
1115 v.Op = OpAMD64SQRTSS
1116 return true
1117 case OpStaticCall:
1118 v.Op = OpAMD64CALLstatic
1119 return true
1120 case OpStore:
1121 return rewriteValueAMD64_OpStore(v)
1122 case OpSub16:
1123 v.Op = OpAMD64SUBL
1124 return true
1125 case OpSub32:
1126 v.Op = OpAMD64SUBL
1127 return true
1128 case OpSub32F:
1129 v.Op = OpAMD64SUBSS
1130 return true
1131 case OpSub64:
1132 v.Op = OpAMD64SUBQ
1133 return true
1134 case OpSub64F:
1135 v.Op = OpAMD64SUBSD
1136 return true
1137 case OpSub8:
1138 v.Op = OpAMD64SUBL
1139 return true
1140 case OpSubPtr:
1141 v.Op = OpAMD64SUBQ
1142 return true
1143 case OpTailCall:
1144 v.Op = OpAMD64CALLtail
1145 return true
1146 case OpTrunc:
1147 return rewriteValueAMD64_OpTrunc(v)
1148 case OpTrunc16to8:
1149 v.Op = OpCopy
1150 return true
1151 case OpTrunc32to16:
1152 v.Op = OpCopy
1153 return true
1154 case OpTrunc32to8:
1155 v.Op = OpCopy
1156 return true
1157 case OpTrunc64to16:
1158 v.Op = OpCopy
1159 return true
1160 case OpTrunc64to32:
1161 v.Op = OpCopy
1162 return true
1163 case OpTrunc64to8:
1164 v.Op = OpCopy
1165 return true
1166 case OpWB:
1167 v.Op = OpAMD64LoweredWB
1168 return true
1169 case OpXor16:
1170 v.Op = OpAMD64XORL
1171 return true
1172 case OpXor32:
1173 v.Op = OpAMD64XORL
1174 return true
1175 case OpXor64:
1176 v.Op = OpAMD64XORQ
1177 return true
1178 case OpXor8:
1179 v.Op = OpAMD64XORL
1180 return true
1181 case OpZero:
1182 return rewriteValueAMD64_OpZero(v)
1183 case OpZeroExt16to32:
1184 v.Op = OpAMD64MOVWQZX
1185 return true
1186 case OpZeroExt16to64:
1187 v.Op = OpAMD64MOVWQZX
1188 return true
1189 case OpZeroExt32to64:
1190 v.Op = OpAMD64MOVLQZX
1191 return true
1192 case OpZeroExt8to16:
1193 v.Op = OpAMD64MOVBQZX
1194 return true
1195 case OpZeroExt8to32:
1196 v.Op = OpAMD64MOVBQZX
1197 return true
1198 case OpZeroExt8to64:
1199 v.Op = OpAMD64MOVBQZX
1200 return true
1201 }
1202 return false
1203 }
1204 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1205 v_2 := v.Args[2]
1206 v_1 := v.Args[1]
1207 v_0 := v.Args[0]
1208
1209
1210
1211 for {
1212 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1213 x := v_0
1214 if v_1.Op != OpAMD64MOVQconst {
1215 continue
1216 }
1217 c := auxIntToInt64(v_1.AuxInt)
1218 carry := v_2
1219 if !(is32Bit(c)) {
1220 continue
1221 }
1222 v.reset(OpAMD64ADCQconst)
1223 v.AuxInt = int32ToAuxInt(int32(c))
1224 v.AddArg2(x, carry)
1225 return true
1226 }
1227 break
1228 }
1229
1230
1231 for {
1232 x := v_0
1233 y := v_1
1234 if v_2.Op != OpAMD64FlagEQ {
1235 break
1236 }
1237 v.reset(OpAMD64ADDQcarry)
1238 v.AddArg2(x, y)
1239 return true
1240 }
1241 return false
1242 }
1243 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1244 v_1 := v.Args[1]
1245 v_0 := v.Args[0]
1246
1247
1248 for {
1249 c := auxIntToInt32(v.AuxInt)
1250 x := v_0
1251 if v_1.Op != OpAMD64FlagEQ {
1252 break
1253 }
1254 v.reset(OpAMD64ADDQconstcarry)
1255 v.AuxInt = int32ToAuxInt(c)
1256 v.AddArg(x)
1257 return true
1258 }
1259 return false
1260 }
1261 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1262 v_1 := v.Args[1]
1263 v_0 := v.Args[0]
1264
1265
1266 for {
1267 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1268 x := v_0
1269 if v_1.Op != OpAMD64MOVLconst {
1270 continue
1271 }
1272 c := auxIntToInt32(v_1.AuxInt)
1273 v.reset(OpAMD64ADDLconst)
1274 v.AuxInt = int32ToAuxInt(c)
1275 v.AddArg(x)
1276 return true
1277 }
1278 break
1279 }
1280
1281
1282 for {
1283 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1284 x := v_0
1285 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1286 continue
1287 }
1288 y := v_1.Args[0]
1289 v.reset(OpAMD64LEAL8)
1290 v.AddArg2(x, y)
1291 return true
1292 }
1293 break
1294 }
1295
1296
1297 for {
1298 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1299 x := v_0
1300 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1301 continue
1302 }
1303 y := v_1.Args[0]
1304 v.reset(OpAMD64LEAL4)
1305 v.AddArg2(x, y)
1306 return true
1307 }
1308 break
1309 }
1310
1311
1312 for {
1313 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1314 x := v_0
1315 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
1316 continue
1317 }
1318 y := v_1.Args[0]
1319 v.reset(OpAMD64LEAL2)
1320 v.AddArg2(x, y)
1321 return true
1322 }
1323 break
1324 }
1325
1326
1327 for {
1328 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1329 x := v_0
1330 if v_1.Op != OpAMD64ADDL {
1331 continue
1332 }
1333 y := v_1.Args[1]
1334 if y != v_1.Args[0] {
1335 continue
1336 }
1337 v.reset(OpAMD64LEAL2)
1338 v.AddArg2(x, y)
1339 return true
1340 }
1341 break
1342 }
1343
1344
1345 for {
1346 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1347 x := v_0
1348 if v_1.Op != OpAMD64ADDL {
1349 continue
1350 }
1351 _ = v_1.Args[1]
1352 v_1_0 := v_1.Args[0]
1353 v_1_1 := v_1.Args[1]
1354 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1355 if x != v_1_0 {
1356 continue
1357 }
1358 y := v_1_1
1359 v.reset(OpAMD64LEAL2)
1360 v.AddArg2(y, x)
1361 return true
1362 }
1363 }
1364 break
1365 }
1366
1367
1368 for {
1369 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1370 if v_0.Op != OpAMD64ADDLconst {
1371 continue
1372 }
1373 c := auxIntToInt32(v_0.AuxInt)
1374 x := v_0.Args[0]
1375 y := v_1
1376 v.reset(OpAMD64LEAL1)
1377 v.AuxInt = int32ToAuxInt(c)
1378 v.AddArg2(x, y)
1379 return true
1380 }
1381 break
1382 }
1383
1384
1385
1386 for {
1387 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1388 x := v_0
1389 if v_1.Op != OpAMD64LEAL {
1390 continue
1391 }
1392 c := auxIntToInt32(v_1.AuxInt)
1393 s := auxToSym(v_1.Aux)
1394 y := v_1.Args[0]
1395 if !(x.Op != OpSB && y.Op != OpSB) {
1396 continue
1397 }
1398 v.reset(OpAMD64LEAL1)
1399 v.AuxInt = int32ToAuxInt(c)
1400 v.Aux = symToAux(s)
1401 v.AddArg2(x, y)
1402 return true
1403 }
1404 break
1405 }
1406
1407
1408 for {
1409 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1410 x := v_0
1411 if v_1.Op != OpAMD64NEGL {
1412 continue
1413 }
1414 y := v_1.Args[0]
1415 v.reset(OpAMD64SUBL)
1416 v.AddArg2(x, y)
1417 return true
1418 }
1419 break
1420 }
1421
1422
1423
1424 for {
1425 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1426 x := v_0
1427 l := v_1
1428 if l.Op != OpAMD64MOVLload {
1429 continue
1430 }
1431 off := auxIntToInt32(l.AuxInt)
1432 sym := auxToSym(l.Aux)
1433 mem := l.Args[1]
1434 ptr := l.Args[0]
1435 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1436 continue
1437 }
1438 v.reset(OpAMD64ADDLload)
1439 v.AuxInt = int32ToAuxInt(off)
1440 v.Aux = symToAux(sym)
1441 v.AddArg3(x, ptr, mem)
1442 return true
1443 }
1444 break
1445 }
1446 return false
1447 }
1448 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1449 v_0 := v.Args[0]
1450
1451
1452 for {
1453 c := auxIntToInt32(v.AuxInt)
1454 if v_0.Op != OpAMD64ADDL {
1455 break
1456 }
1457 y := v_0.Args[1]
1458 x := v_0.Args[0]
1459 v.reset(OpAMD64LEAL1)
1460 v.AuxInt = int32ToAuxInt(c)
1461 v.AddArg2(x, y)
1462 return true
1463 }
1464
1465
1466 for {
1467 c := auxIntToInt32(v.AuxInt)
1468 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1469 break
1470 }
1471 x := v_0.Args[0]
1472 v.reset(OpAMD64LEAL1)
1473 v.AuxInt = int32ToAuxInt(c)
1474 v.AddArg2(x, x)
1475 return true
1476 }
1477
1478
1479
1480 for {
1481 c := auxIntToInt32(v.AuxInt)
1482 if v_0.Op != OpAMD64LEAL {
1483 break
1484 }
1485 d := auxIntToInt32(v_0.AuxInt)
1486 s := auxToSym(v_0.Aux)
1487 x := v_0.Args[0]
1488 if !(is32Bit(int64(c) + int64(d))) {
1489 break
1490 }
1491 v.reset(OpAMD64LEAL)
1492 v.AuxInt = int32ToAuxInt(c + d)
1493 v.Aux = symToAux(s)
1494 v.AddArg(x)
1495 return true
1496 }
1497
1498
1499
1500 for {
1501 c := auxIntToInt32(v.AuxInt)
1502 if v_0.Op != OpAMD64LEAL1 {
1503 break
1504 }
1505 d := auxIntToInt32(v_0.AuxInt)
1506 s := auxToSym(v_0.Aux)
1507 y := v_0.Args[1]
1508 x := v_0.Args[0]
1509 if !(is32Bit(int64(c) + int64(d))) {
1510 break
1511 }
1512 v.reset(OpAMD64LEAL1)
1513 v.AuxInt = int32ToAuxInt(c + d)
1514 v.Aux = symToAux(s)
1515 v.AddArg2(x, y)
1516 return true
1517 }
1518
1519
1520
1521 for {
1522 c := auxIntToInt32(v.AuxInt)
1523 if v_0.Op != OpAMD64LEAL2 {
1524 break
1525 }
1526 d := auxIntToInt32(v_0.AuxInt)
1527 s := auxToSym(v_0.Aux)
1528 y := v_0.Args[1]
1529 x := v_0.Args[0]
1530 if !(is32Bit(int64(c) + int64(d))) {
1531 break
1532 }
1533 v.reset(OpAMD64LEAL2)
1534 v.AuxInt = int32ToAuxInt(c + d)
1535 v.Aux = symToAux(s)
1536 v.AddArg2(x, y)
1537 return true
1538 }
1539
1540
1541
1542 for {
1543 c := auxIntToInt32(v.AuxInt)
1544 if v_0.Op != OpAMD64LEAL4 {
1545 break
1546 }
1547 d := auxIntToInt32(v_0.AuxInt)
1548 s := auxToSym(v_0.Aux)
1549 y := v_0.Args[1]
1550 x := v_0.Args[0]
1551 if !(is32Bit(int64(c) + int64(d))) {
1552 break
1553 }
1554 v.reset(OpAMD64LEAL4)
1555 v.AuxInt = int32ToAuxInt(c + d)
1556 v.Aux = symToAux(s)
1557 v.AddArg2(x, y)
1558 return true
1559 }
1560
1561
1562
1563 for {
1564 c := auxIntToInt32(v.AuxInt)
1565 if v_0.Op != OpAMD64LEAL8 {
1566 break
1567 }
1568 d := auxIntToInt32(v_0.AuxInt)
1569 s := auxToSym(v_0.Aux)
1570 y := v_0.Args[1]
1571 x := v_0.Args[0]
1572 if !(is32Bit(int64(c) + int64(d))) {
1573 break
1574 }
1575 v.reset(OpAMD64LEAL8)
1576 v.AuxInt = int32ToAuxInt(c + d)
1577 v.Aux = symToAux(s)
1578 v.AddArg2(x, y)
1579 return true
1580 }
1581
1582
1583
1584 for {
1585 c := auxIntToInt32(v.AuxInt)
1586 x := v_0
1587 if !(c == 0) {
1588 break
1589 }
1590 v.copyOf(x)
1591 return true
1592 }
1593
1594
1595 for {
1596 c := auxIntToInt32(v.AuxInt)
1597 if v_0.Op != OpAMD64MOVLconst {
1598 break
1599 }
1600 d := auxIntToInt32(v_0.AuxInt)
1601 v.reset(OpAMD64MOVLconst)
1602 v.AuxInt = int32ToAuxInt(c + d)
1603 return true
1604 }
1605
1606
1607 for {
1608 c := auxIntToInt32(v.AuxInt)
1609 if v_0.Op != OpAMD64ADDLconst {
1610 break
1611 }
1612 d := auxIntToInt32(v_0.AuxInt)
1613 x := v_0.Args[0]
1614 v.reset(OpAMD64ADDLconst)
1615 v.AuxInt = int32ToAuxInt(c + d)
1616 v.AddArg(x)
1617 return true
1618 }
1619
1620
1621 for {
1622 off := auxIntToInt32(v.AuxInt)
1623 x := v_0
1624 if x.Op != OpSP {
1625 break
1626 }
1627 v.reset(OpAMD64LEAL)
1628 v.AuxInt = int32ToAuxInt(off)
1629 v.AddArg(x)
1630 return true
1631 }
1632 return false
1633 }
1634 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1635 v_1 := v.Args[1]
1636 v_0 := v.Args[0]
1637
1638
1639
1640 for {
1641 valoff1 := auxIntToValAndOff(v.AuxInt)
1642 sym := auxToSym(v.Aux)
1643 if v_0.Op != OpAMD64ADDQconst {
1644 break
1645 }
1646 off2 := auxIntToInt32(v_0.AuxInt)
1647 base := v_0.Args[0]
1648 mem := v_1
1649 if !(ValAndOff(valoff1).canAdd32(off2)) {
1650 break
1651 }
1652 v.reset(OpAMD64ADDLconstmodify)
1653 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1654 v.Aux = symToAux(sym)
1655 v.AddArg2(base, mem)
1656 return true
1657 }
1658
1659
1660
1661 for {
1662 valoff1 := auxIntToValAndOff(v.AuxInt)
1663 sym1 := auxToSym(v.Aux)
1664 if v_0.Op != OpAMD64LEAQ {
1665 break
1666 }
1667 off2 := auxIntToInt32(v_0.AuxInt)
1668 sym2 := auxToSym(v_0.Aux)
1669 base := v_0.Args[0]
1670 mem := v_1
1671 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1672 break
1673 }
1674 v.reset(OpAMD64ADDLconstmodify)
1675 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1676 v.Aux = symToAux(mergeSym(sym1, sym2))
1677 v.AddArg2(base, mem)
1678 return true
1679 }
1680 return false
1681 }
1682 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1683 v_2 := v.Args[2]
1684 v_1 := v.Args[1]
1685 v_0 := v.Args[0]
1686 b := v.Block
1687 typ := &b.Func.Config.Types
1688
1689
1690
1691 for {
1692 off1 := auxIntToInt32(v.AuxInt)
1693 sym := auxToSym(v.Aux)
1694 val := v_0
1695 if v_1.Op != OpAMD64ADDQconst {
1696 break
1697 }
1698 off2 := auxIntToInt32(v_1.AuxInt)
1699 base := v_1.Args[0]
1700 mem := v_2
1701 if !(is32Bit(int64(off1) + int64(off2))) {
1702 break
1703 }
1704 v.reset(OpAMD64ADDLload)
1705 v.AuxInt = int32ToAuxInt(off1 + off2)
1706 v.Aux = symToAux(sym)
1707 v.AddArg3(val, base, mem)
1708 return true
1709 }
1710
1711
1712
1713 for {
1714 off1 := auxIntToInt32(v.AuxInt)
1715 sym1 := auxToSym(v.Aux)
1716 val := v_0
1717 if v_1.Op != OpAMD64LEAQ {
1718 break
1719 }
1720 off2 := auxIntToInt32(v_1.AuxInt)
1721 sym2 := auxToSym(v_1.Aux)
1722 base := v_1.Args[0]
1723 mem := v_2
1724 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1725 break
1726 }
1727 v.reset(OpAMD64ADDLload)
1728 v.AuxInt = int32ToAuxInt(off1 + off2)
1729 v.Aux = symToAux(mergeSym(sym1, sym2))
1730 v.AddArg3(val, base, mem)
1731 return true
1732 }
1733
1734
1735 for {
1736 off := auxIntToInt32(v.AuxInt)
1737 sym := auxToSym(v.Aux)
1738 x := v_0
1739 ptr := v_1
1740 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1741 break
1742 }
1743 y := v_2.Args[1]
1744 if ptr != v_2.Args[0] {
1745 break
1746 }
1747 v.reset(OpAMD64ADDL)
1748 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1749 v0.AddArg(y)
1750 v.AddArg2(x, v0)
1751 return true
1752 }
1753 return false
1754 }
1755 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1756 v_2 := v.Args[2]
1757 v_1 := v.Args[1]
1758 v_0 := v.Args[0]
1759
1760
1761
1762 for {
1763 off1 := auxIntToInt32(v.AuxInt)
1764 sym := auxToSym(v.Aux)
1765 if v_0.Op != OpAMD64ADDQconst {
1766 break
1767 }
1768 off2 := auxIntToInt32(v_0.AuxInt)
1769 base := v_0.Args[0]
1770 val := v_1
1771 mem := v_2
1772 if !(is32Bit(int64(off1) + int64(off2))) {
1773 break
1774 }
1775 v.reset(OpAMD64ADDLmodify)
1776 v.AuxInt = int32ToAuxInt(off1 + off2)
1777 v.Aux = symToAux(sym)
1778 v.AddArg3(base, val, mem)
1779 return true
1780 }
1781
1782
1783
1784 for {
1785 off1 := auxIntToInt32(v.AuxInt)
1786 sym1 := auxToSym(v.Aux)
1787 if v_0.Op != OpAMD64LEAQ {
1788 break
1789 }
1790 off2 := auxIntToInt32(v_0.AuxInt)
1791 sym2 := auxToSym(v_0.Aux)
1792 base := v_0.Args[0]
1793 val := v_1
1794 mem := v_2
1795 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1796 break
1797 }
1798 v.reset(OpAMD64ADDLmodify)
1799 v.AuxInt = int32ToAuxInt(off1 + off2)
1800 v.Aux = symToAux(mergeSym(sym1, sym2))
1801 v.AddArg3(base, val, mem)
1802 return true
1803 }
1804 return false
1805 }
1806 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1807 v_1 := v.Args[1]
1808 v_0 := v.Args[0]
1809
1810
1811
1812 for {
1813 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1814 x := v_0
1815 if v_1.Op != OpAMD64MOVQconst {
1816 continue
1817 }
1818 t := v_1.Type
1819 c := auxIntToInt64(v_1.AuxInt)
1820 if !(is32Bit(c) && !t.IsPtr()) {
1821 continue
1822 }
1823 v.reset(OpAMD64ADDQconst)
1824 v.AuxInt = int32ToAuxInt(int32(c))
1825 v.AddArg(x)
1826 return true
1827 }
1828 break
1829 }
1830
1831
1832 for {
1833 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1834 x := v_0
1835 if v_1.Op != OpAMD64MOVLconst {
1836 continue
1837 }
1838 c := auxIntToInt32(v_1.AuxInt)
1839 v.reset(OpAMD64ADDQconst)
1840 v.AuxInt = int32ToAuxInt(c)
1841 v.AddArg(x)
1842 return true
1843 }
1844 break
1845 }
1846
1847
1848 for {
1849 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1850 x := v_0
1851 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1852 continue
1853 }
1854 y := v_1.Args[0]
1855 v.reset(OpAMD64LEAQ8)
1856 v.AddArg2(x, y)
1857 return true
1858 }
1859 break
1860 }
1861
1862
1863 for {
1864 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1865 x := v_0
1866 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1867 continue
1868 }
1869 y := v_1.Args[0]
1870 v.reset(OpAMD64LEAQ4)
1871 v.AddArg2(x, y)
1872 return true
1873 }
1874 break
1875 }
1876
1877
1878 for {
1879 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1880 x := v_0
1881 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
1882 continue
1883 }
1884 y := v_1.Args[0]
1885 v.reset(OpAMD64LEAQ2)
1886 v.AddArg2(x, y)
1887 return true
1888 }
1889 break
1890 }
1891
1892
1893 for {
1894 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1895 x := v_0
1896 if v_1.Op != OpAMD64ADDQ {
1897 continue
1898 }
1899 y := v_1.Args[1]
1900 if y != v_1.Args[0] {
1901 continue
1902 }
1903 v.reset(OpAMD64LEAQ2)
1904 v.AddArg2(x, y)
1905 return true
1906 }
1907 break
1908 }
1909
1910
1911 for {
1912 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1913 x := v_0
1914 if v_1.Op != OpAMD64ADDQ {
1915 continue
1916 }
1917 _ = v_1.Args[1]
1918 v_1_0 := v_1.Args[0]
1919 v_1_1 := v_1.Args[1]
1920 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1921 if x != v_1_0 {
1922 continue
1923 }
1924 y := v_1_1
1925 v.reset(OpAMD64LEAQ2)
1926 v.AddArg2(y, x)
1927 return true
1928 }
1929 }
1930 break
1931 }
1932
1933
1934 for {
1935 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1936 if v_0.Op != OpAMD64ADDQconst {
1937 continue
1938 }
1939 c := auxIntToInt32(v_0.AuxInt)
1940 x := v_0.Args[0]
1941 y := v_1
1942 v.reset(OpAMD64LEAQ1)
1943 v.AuxInt = int32ToAuxInt(c)
1944 v.AddArg2(x, y)
1945 return true
1946 }
1947 break
1948 }
1949
1950
1951
1952 for {
1953 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1954 x := v_0
1955 if v_1.Op != OpAMD64LEAQ {
1956 continue
1957 }
1958 c := auxIntToInt32(v_1.AuxInt)
1959 s := auxToSym(v_1.Aux)
1960 y := v_1.Args[0]
1961 if !(x.Op != OpSB && y.Op != OpSB) {
1962 continue
1963 }
1964 v.reset(OpAMD64LEAQ1)
1965 v.AuxInt = int32ToAuxInt(c)
1966 v.Aux = symToAux(s)
1967 v.AddArg2(x, y)
1968 return true
1969 }
1970 break
1971 }
1972
1973
1974 for {
1975 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1976 x := v_0
1977 if v_1.Op != OpAMD64NEGQ {
1978 continue
1979 }
1980 y := v_1.Args[0]
1981 v.reset(OpAMD64SUBQ)
1982 v.AddArg2(x, y)
1983 return true
1984 }
1985 break
1986 }
1987
1988
1989
1990 for {
1991 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1992 x := v_0
1993 l := v_1
1994 if l.Op != OpAMD64MOVQload {
1995 continue
1996 }
1997 off := auxIntToInt32(l.AuxInt)
1998 sym := auxToSym(l.Aux)
1999 mem := l.Args[1]
2000 ptr := l.Args[0]
2001 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2002 continue
2003 }
2004 v.reset(OpAMD64ADDQload)
2005 v.AuxInt = int32ToAuxInt(off)
2006 v.Aux = symToAux(sym)
2007 v.AddArg3(x, ptr, mem)
2008 return true
2009 }
2010 break
2011 }
2012 return false
2013 }
2014 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2015 v_1 := v.Args[1]
2016 v_0 := v.Args[0]
2017
2018
2019
2020 for {
2021 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2022 x := v_0
2023 if v_1.Op != OpAMD64MOVQconst {
2024 continue
2025 }
2026 c := auxIntToInt64(v_1.AuxInt)
2027 if !(is32Bit(c)) {
2028 continue
2029 }
2030 v.reset(OpAMD64ADDQconstcarry)
2031 v.AuxInt = int32ToAuxInt(int32(c))
2032 v.AddArg(x)
2033 return true
2034 }
2035 break
2036 }
2037 return false
2038 }
2039 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2040 v_0 := v.Args[0]
2041
2042
2043 for {
2044 c := auxIntToInt32(v.AuxInt)
2045 if v_0.Op != OpAMD64ADDQ {
2046 break
2047 }
2048 y := v_0.Args[1]
2049 x := v_0.Args[0]
2050 v.reset(OpAMD64LEAQ1)
2051 v.AuxInt = int32ToAuxInt(c)
2052 v.AddArg2(x, y)
2053 return true
2054 }
2055
2056
2057 for {
2058 c := auxIntToInt32(v.AuxInt)
2059 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
2060 break
2061 }
2062 x := v_0.Args[0]
2063 v.reset(OpAMD64LEAQ1)
2064 v.AuxInt = int32ToAuxInt(c)
2065 v.AddArg2(x, x)
2066 return true
2067 }
2068
2069
2070
2071 for {
2072 c := auxIntToInt32(v.AuxInt)
2073 if v_0.Op != OpAMD64LEAQ {
2074 break
2075 }
2076 d := auxIntToInt32(v_0.AuxInt)
2077 s := auxToSym(v_0.Aux)
2078 x := v_0.Args[0]
2079 if !(is32Bit(int64(c) + int64(d))) {
2080 break
2081 }
2082 v.reset(OpAMD64LEAQ)
2083 v.AuxInt = int32ToAuxInt(c + d)
2084 v.Aux = symToAux(s)
2085 v.AddArg(x)
2086 return true
2087 }
2088
2089
2090
2091 for {
2092 c := auxIntToInt32(v.AuxInt)
2093 if v_0.Op != OpAMD64LEAQ1 {
2094 break
2095 }
2096 d := auxIntToInt32(v_0.AuxInt)
2097 s := auxToSym(v_0.Aux)
2098 y := v_0.Args[1]
2099 x := v_0.Args[0]
2100 if !(is32Bit(int64(c) + int64(d))) {
2101 break
2102 }
2103 v.reset(OpAMD64LEAQ1)
2104 v.AuxInt = int32ToAuxInt(c + d)
2105 v.Aux = symToAux(s)
2106 v.AddArg2(x, y)
2107 return true
2108 }
2109
2110
2111
2112 for {
2113 c := auxIntToInt32(v.AuxInt)
2114 if v_0.Op != OpAMD64LEAQ2 {
2115 break
2116 }
2117 d := auxIntToInt32(v_0.AuxInt)
2118 s := auxToSym(v_0.Aux)
2119 y := v_0.Args[1]
2120 x := v_0.Args[0]
2121 if !(is32Bit(int64(c) + int64(d))) {
2122 break
2123 }
2124 v.reset(OpAMD64LEAQ2)
2125 v.AuxInt = int32ToAuxInt(c + d)
2126 v.Aux = symToAux(s)
2127 v.AddArg2(x, y)
2128 return true
2129 }
2130
2131
2132
2133 for {
2134 c := auxIntToInt32(v.AuxInt)
2135 if v_0.Op != OpAMD64LEAQ4 {
2136 break
2137 }
2138 d := auxIntToInt32(v_0.AuxInt)
2139 s := auxToSym(v_0.Aux)
2140 y := v_0.Args[1]
2141 x := v_0.Args[0]
2142 if !(is32Bit(int64(c) + int64(d))) {
2143 break
2144 }
2145 v.reset(OpAMD64LEAQ4)
2146 v.AuxInt = int32ToAuxInt(c + d)
2147 v.Aux = symToAux(s)
2148 v.AddArg2(x, y)
2149 return true
2150 }
2151
2152
2153
2154 for {
2155 c := auxIntToInt32(v.AuxInt)
2156 if v_0.Op != OpAMD64LEAQ8 {
2157 break
2158 }
2159 d := auxIntToInt32(v_0.AuxInt)
2160 s := auxToSym(v_0.Aux)
2161 y := v_0.Args[1]
2162 x := v_0.Args[0]
2163 if !(is32Bit(int64(c) + int64(d))) {
2164 break
2165 }
2166 v.reset(OpAMD64LEAQ8)
2167 v.AuxInt = int32ToAuxInt(c + d)
2168 v.Aux = symToAux(s)
2169 v.AddArg2(x, y)
2170 return true
2171 }
2172
2173
2174 for {
2175 if auxIntToInt32(v.AuxInt) != 0 {
2176 break
2177 }
2178 x := v_0
2179 v.copyOf(x)
2180 return true
2181 }
2182
2183
2184 for {
2185 c := auxIntToInt32(v.AuxInt)
2186 if v_0.Op != OpAMD64MOVQconst {
2187 break
2188 }
2189 d := auxIntToInt64(v_0.AuxInt)
2190 v.reset(OpAMD64MOVQconst)
2191 v.AuxInt = int64ToAuxInt(int64(c) + d)
2192 return true
2193 }
2194
2195
2196
2197 for {
2198 c := auxIntToInt32(v.AuxInt)
2199 if v_0.Op != OpAMD64ADDQconst {
2200 break
2201 }
2202 d := auxIntToInt32(v_0.AuxInt)
2203 x := v_0.Args[0]
2204 if !(is32Bit(int64(c) + int64(d))) {
2205 break
2206 }
2207 v.reset(OpAMD64ADDQconst)
2208 v.AuxInt = int32ToAuxInt(c + d)
2209 v.AddArg(x)
2210 return true
2211 }
2212
2213
2214 for {
2215 off := auxIntToInt32(v.AuxInt)
2216 x := v_0
2217 if x.Op != OpSP {
2218 break
2219 }
2220 v.reset(OpAMD64LEAQ)
2221 v.AuxInt = int32ToAuxInt(off)
2222 v.AddArg(x)
2223 return true
2224 }
2225 return false
2226 }
2227 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2228 v_1 := v.Args[1]
2229 v_0 := v.Args[0]
2230
2231
2232
2233 for {
2234 valoff1 := auxIntToValAndOff(v.AuxInt)
2235 sym := auxToSym(v.Aux)
2236 if v_0.Op != OpAMD64ADDQconst {
2237 break
2238 }
2239 off2 := auxIntToInt32(v_0.AuxInt)
2240 base := v_0.Args[0]
2241 mem := v_1
2242 if !(ValAndOff(valoff1).canAdd32(off2)) {
2243 break
2244 }
2245 v.reset(OpAMD64ADDQconstmodify)
2246 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2247 v.Aux = symToAux(sym)
2248 v.AddArg2(base, mem)
2249 return true
2250 }
2251
2252
2253
2254 for {
2255 valoff1 := auxIntToValAndOff(v.AuxInt)
2256 sym1 := auxToSym(v.Aux)
2257 if v_0.Op != OpAMD64LEAQ {
2258 break
2259 }
2260 off2 := auxIntToInt32(v_0.AuxInt)
2261 sym2 := auxToSym(v_0.Aux)
2262 base := v_0.Args[0]
2263 mem := v_1
2264 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2265 break
2266 }
2267 v.reset(OpAMD64ADDQconstmodify)
2268 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2269 v.Aux = symToAux(mergeSym(sym1, sym2))
2270 v.AddArg2(base, mem)
2271 return true
2272 }
2273 return false
2274 }
2275 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2276 v_2 := v.Args[2]
2277 v_1 := v.Args[1]
2278 v_0 := v.Args[0]
2279 b := v.Block
2280 typ := &b.Func.Config.Types
2281
2282
2283
2284 for {
2285 off1 := auxIntToInt32(v.AuxInt)
2286 sym := auxToSym(v.Aux)
2287 val := v_0
2288 if v_1.Op != OpAMD64ADDQconst {
2289 break
2290 }
2291 off2 := auxIntToInt32(v_1.AuxInt)
2292 base := v_1.Args[0]
2293 mem := v_2
2294 if !(is32Bit(int64(off1) + int64(off2))) {
2295 break
2296 }
2297 v.reset(OpAMD64ADDQload)
2298 v.AuxInt = int32ToAuxInt(off1 + off2)
2299 v.Aux = symToAux(sym)
2300 v.AddArg3(val, base, mem)
2301 return true
2302 }
2303
2304
2305
2306 for {
2307 off1 := auxIntToInt32(v.AuxInt)
2308 sym1 := auxToSym(v.Aux)
2309 val := v_0
2310 if v_1.Op != OpAMD64LEAQ {
2311 break
2312 }
2313 off2 := auxIntToInt32(v_1.AuxInt)
2314 sym2 := auxToSym(v_1.Aux)
2315 base := v_1.Args[0]
2316 mem := v_2
2317 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2318 break
2319 }
2320 v.reset(OpAMD64ADDQload)
2321 v.AuxInt = int32ToAuxInt(off1 + off2)
2322 v.Aux = symToAux(mergeSym(sym1, sym2))
2323 v.AddArg3(val, base, mem)
2324 return true
2325 }
2326
2327
2328 for {
2329 off := auxIntToInt32(v.AuxInt)
2330 sym := auxToSym(v.Aux)
2331 x := v_0
2332 ptr := v_1
2333 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2334 break
2335 }
2336 y := v_2.Args[1]
2337 if ptr != v_2.Args[0] {
2338 break
2339 }
2340 v.reset(OpAMD64ADDQ)
2341 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2342 v0.AddArg(y)
2343 v.AddArg2(x, v0)
2344 return true
2345 }
2346 return false
2347 }
2348 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2349 v_2 := v.Args[2]
2350 v_1 := v.Args[1]
2351 v_0 := v.Args[0]
2352
2353
2354
2355 for {
2356 off1 := auxIntToInt32(v.AuxInt)
2357 sym := auxToSym(v.Aux)
2358 if v_0.Op != OpAMD64ADDQconst {
2359 break
2360 }
2361 off2 := auxIntToInt32(v_0.AuxInt)
2362 base := v_0.Args[0]
2363 val := v_1
2364 mem := v_2
2365 if !(is32Bit(int64(off1) + int64(off2))) {
2366 break
2367 }
2368 v.reset(OpAMD64ADDQmodify)
2369 v.AuxInt = int32ToAuxInt(off1 + off2)
2370 v.Aux = symToAux(sym)
2371 v.AddArg3(base, val, mem)
2372 return true
2373 }
2374
2375
2376
2377 for {
2378 off1 := auxIntToInt32(v.AuxInt)
2379 sym1 := auxToSym(v.Aux)
2380 if v_0.Op != OpAMD64LEAQ {
2381 break
2382 }
2383 off2 := auxIntToInt32(v_0.AuxInt)
2384 sym2 := auxToSym(v_0.Aux)
2385 base := v_0.Args[0]
2386 val := v_1
2387 mem := v_2
2388 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2389 break
2390 }
2391 v.reset(OpAMD64ADDQmodify)
2392 v.AuxInt = int32ToAuxInt(off1 + off2)
2393 v.Aux = symToAux(mergeSym(sym1, sym2))
2394 v.AddArg3(base, val, mem)
2395 return true
2396 }
2397 return false
2398 }
2399 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2400 v_1 := v.Args[1]
2401 v_0 := v.Args[0]
2402
2403
2404
2405 for {
2406 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2407 x := v_0
2408 l := v_1
2409 if l.Op != OpAMD64MOVSDload {
2410 continue
2411 }
2412 off := auxIntToInt32(l.AuxInt)
2413 sym := auxToSym(l.Aux)
2414 mem := l.Args[1]
2415 ptr := l.Args[0]
2416 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2417 continue
2418 }
2419 v.reset(OpAMD64ADDSDload)
2420 v.AuxInt = int32ToAuxInt(off)
2421 v.Aux = symToAux(sym)
2422 v.AddArg3(x, ptr, mem)
2423 return true
2424 }
2425 break
2426 }
2427 return false
2428 }
2429 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2430 v_2 := v.Args[2]
2431 v_1 := v.Args[1]
2432 v_0 := v.Args[0]
2433 b := v.Block
2434 typ := &b.Func.Config.Types
2435
2436
2437
2438 for {
2439 off1 := auxIntToInt32(v.AuxInt)
2440 sym := auxToSym(v.Aux)
2441 val := v_0
2442 if v_1.Op != OpAMD64ADDQconst {
2443 break
2444 }
2445 off2 := auxIntToInt32(v_1.AuxInt)
2446 base := v_1.Args[0]
2447 mem := v_2
2448 if !(is32Bit(int64(off1) + int64(off2))) {
2449 break
2450 }
2451 v.reset(OpAMD64ADDSDload)
2452 v.AuxInt = int32ToAuxInt(off1 + off2)
2453 v.Aux = symToAux(sym)
2454 v.AddArg3(val, base, mem)
2455 return true
2456 }
2457
2458
2459
2460 for {
2461 off1 := auxIntToInt32(v.AuxInt)
2462 sym1 := auxToSym(v.Aux)
2463 val := v_0
2464 if v_1.Op != OpAMD64LEAQ {
2465 break
2466 }
2467 off2 := auxIntToInt32(v_1.AuxInt)
2468 sym2 := auxToSym(v_1.Aux)
2469 base := v_1.Args[0]
2470 mem := v_2
2471 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2472 break
2473 }
2474 v.reset(OpAMD64ADDSDload)
2475 v.AuxInt = int32ToAuxInt(off1 + off2)
2476 v.Aux = symToAux(mergeSym(sym1, sym2))
2477 v.AddArg3(val, base, mem)
2478 return true
2479 }
2480
2481
2482 for {
2483 off := auxIntToInt32(v.AuxInt)
2484 sym := auxToSym(v.Aux)
2485 x := v_0
2486 ptr := v_1
2487 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2488 break
2489 }
2490 y := v_2.Args[1]
2491 if ptr != v_2.Args[0] {
2492 break
2493 }
2494 v.reset(OpAMD64ADDSD)
2495 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2496 v0.AddArg(y)
2497 v.AddArg2(x, v0)
2498 return true
2499 }
2500 return false
2501 }
2502 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2503 v_1 := v.Args[1]
2504 v_0 := v.Args[0]
2505
2506
2507
2508 for {
2509 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2510 x := v_0
2511 l := v_1
2512 if l.Op != OpAMD64MOVSSload {
2513 continue
2514 }
2515 off := auxIntToInt32(l.AuxInt)
2516 sym := auxToSym(l.Aux)
2517 mem := l.Args[1]
2518 ptr := l.Args[0]
2519 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2520 continue
2521 }
2522 v.reset(OpAMD64ADDSSload)
2523 v.AuxInt = int32ToAuxInt(off)
2524 v.Aux = symToAux(sym)
2525 v.AddArg3(x, ptr, mem)
2526 return true
2527 }
2528 break
2529 }
2530 return false
2531 }
2532 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2533 v_2 := v.Args[2]
2534 v_1 := v.Args[1]
2535 v_0 := v.Args[0]
2536 b := v.Block
2537 typ := &b.Func.Config.Types
2538
2539
2540
2541 for {
2542 off1 := auxIntToInt32(v.AuxInt)
2543 sym := auxToSym(v.Aux)
2544 val := v_0
2545 if v_1.Op != OpAMD64ADDQconst {
2546 break
2547 }
2548 off2 := auxIntToInt32(v_1.AuxInt)
2549 base := v_1.Args[0]
2550 mem := v_2
2551 if !(is32Bit(int64(off1) + int64(off2))) {
2552 break
2553 }
2554 v.reset(OpAMD64ADDSSload)
2555 v.AuxInt = int32ToAuxInt(off1 + off2)
2556 v.Aux = symToAux(sym)
2557 v.AddArg3(val, base, mem)
2558 return true
2559 }
2560
2561
2562
2563 for {
2564 off1 := auxIntToInt32(v.AuxInt)
2565 sym1 := auxToSym(v.Aux)
2566 val := v_0
2567 if v_1.Op != OpAMD64LEAQ {
2568 break
2569 }
2570 off2 := auxIntToInt32(v_1.AuxInt)
2571 sym2 := auxToSym(v_1.Aux)
2572 base := v_1.Args[0]
2573 mem := v_2
2574 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2575 break
2576 }
2577 v.reset(OpAMD64ADDSSload)
2578 v.AuxInt = int32ToAuxInt(off1 + off2)
2579 v.Aux = symToAux(mergeSym(sym1, sym2))
2580 v.AddArg3(val, base, mem)
2581 return true
2582 }
2583
2584
2585 for {
2586 off := auxIntToInt32(v.AuxInt)
2587 sym := auxToSym(v.Aux)
2588 x := v_0
2589 ptr := v_1
2590 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2591 break
2592 }
2593 y := v_2.Args[1]
2594 if ptr != v_2.Args[0] {
2595 break
2596 }
2597 v.reset(OpAMD64ADDSS)
2598 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2599 v0.AddArg(y)
2600 v.AddArg2(x, v0)
2601 return true
2602 }
2603 return false
2604 }
2605 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2606 v_1 := v.Args[1]
2607 v_0 := v.Args[0]
2608 b := v.Block
2609 typ := &b.Func.Config.Types
2610
2611
2612 for {
2613 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2614 if v_0.Op != OpAMD64NOTL {
2615 continue
2616 }
2617 v_0_0 := v_0.Args[0]
2618 if v_0_0.Op != OpAMD64SHLL {
2619 continue
2620 }
2621 y := v_0_0.Args[1]
2622 v_0_0_0 := v_0_0.Args[0]
2623 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2624 continue
2625 }
2626 x := v_1
2627 v.reset(OpAMD64BTRL)
2628 v.AddArg2(x, y)
2629 return true
2630 }
2631 break
2632 }
2633
2634
2635 for {
2636 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2637 x := v_0
2638 if v_1.Op != OpAMD64MOVLconst {
2639 continue
2640 }
2641 c := auxIntToInt32(v_1.AuxInt)
2642 v.reset(OpAMD64ANDLconst)
2643 v.AuxInt = int32ToAuxInt(c)
2644 v.AddArg(x)
2645 return true
2646 }
2647 break
2648 }
2649
2650
2651 for {
2652 x := v_0
2653 if x != v_1 {
2654 break
2655 }
2656 v.copyOf(x)
2657 return true
2658 }
2659
2660
2661
2662 for {
2663 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2664 x := v_0
2665 l := v_1
2666 if l.Op != OpAMD64MOVLload {
2667 continue
2668 }
2669 off := auxIntToInt32(l.AuxInt)
2670 sym := auxToSym(l.Aux)
2671 mem := l.Args[1]
2672 ptr := l.Args[0]
2673 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2674 continue
2675 }
2676 v.reset(OpAMD64ANDLload)
2677 v.AuxInt = int32ToAuxInt(off)
2678 v.Aux = symToAux(sym)
2679 v.AddArg3(x, ptr, mem)
2680 return true
2681 }
2682 break
2683 }
2684
2685
2686
2687 for {
2688 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2689 x := v_0
2690 if v_1.Op != OpAMD64NOTL {
2691 continue
2692 }
2693 y := v_1.Args[0]
2694 if !(buildcfg.GOAMD64 >= 3) {
2695 continue
2696 }
2697 v.reset(OpAMD64ANDNL)
2698 v.AddArg2(x, y)
2699 return true
2700 }
2701 break
2702 }
2703
2704
2705
2706 for {
2707 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2708 x := v_0
2709 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2710 continue
2711 }
2712 v.reset(OpAMD64BLSIL)
2713 v.AddArg(x)
2714 return true
2715 }
2716 break
2717 }
2718
2719
2720
2721 for {
2722 t := v.Type
2723 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2724 x := v_0
2725 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2726 continue
2727 }
2728 v.reset(OpSelect0)
2729 v.Type = t
2730 v0 := b.NewValue0(v.Pos, OpAMD64BLSRL, types.NewTuple(typ.UInt32, types.TypeFlags))
2731 v0.AddArg(x)
2732 v.AddArg(v0)
2733 return true
2734 }
2735 break
2736 }
2737 return false
2738 }
2739 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2740 v_0 := v.Args[0]
2741
2742
2743 for {
2744 c := auxIntToInt32(v.AuxInt)
2745 if v_0.Op != OpAMD64ANDLconst {
2746 break
2747 }
2748 d := auxIntToInt32(v_0.AuxInt)
2749 x := v_0.Args[0]
2750 v.reset(OpAMD64ANDLconst)
2751 v.AuxInt = int32ToAuxInt(c & d)
2752 v.AddArg(x)
2753 return true
2754 }
2755
2756
2757 for {
2758 if auxIntToInt32(v.AuxInt) != 0xFF {
2759 break
2760 }
2761 x := v_0
2762 v.reset(OpAMD64MOVBQZX)
2763 v.AddArg(x)
2764 return true
2765 }
2766
2767
2768 for {
2769 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2770 break
2771 }
2772 x := v_0
2773 v.reset(OpAMD64MOVWQZX)
2774 v.AddArg(x)
2775 return true
2776 }
2777
2778
2779
2780 for {
2781 c := auxIntToInt32(v.AuxInt)
2782 if !(c == 0) {
2783 break
2784 }
2785 v.reset(OpAMD64MOVLconst)
2786 v.AuxInt = int32ToAuxInt(0)
2787 return true
2788 }
2789
2790
2791
2792 for {
2793 c := auxIntToInt32(v.AuxInt)
2794 x := v_0
2795 if !(c == -1) {
2796 break
2797 }
2798 v.copyOf(x)
2799 return true
2800 }
2801
2802
2803 for {
2804 c := auxIntToInt32(v.AuxInt)
2805 if v_0.Op != OpAMD64MOVLconst {
2806 break
2807 }
2808 d := auxIntToInt32(v_0.AuxInt)
2809 v.reset(OpAMD64MOVLconst)
2810 v.AuxInt = int32ToAuxInt(c & d)
2811 return true
2812 }
2813 return false
2814 }
2815 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2816 v_1 := v.Args[1]
2817 v_0 := v.Args[0]
2818
2819
2820
2821 for {
2822 valoff1 := auxIntToValAndOff(v.AuxInt)
2823 sym := auxToSym(v.Aux)
2824 if v_0.Op != OpAMD64ADDQconst {
2825 break
2826 }
2827 off2 := auxIntToInt32(v_0.AuxInt)
2828 base := v_0.Args[0]
2829 mem := v_1
2830 if !(ValAndOff(valoff1).canAdd32(off2)) {
2831 break
2832 }
2833 v.reset(OpAMD64ANDLconstmodify)
2834 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2835 v.Aux = symToAux(sym)
2836 v.AddArg2(base, mem)
2837 return true
2838 }
2839
2840
2841
2842 for {
2843 valoff1 := auxIntToValAndOff(v.AuxInt)
2844 sym1 := auxToSym(v.Aux)
2845 if v_0.Op != OpAMD64LEAQ {
2846 break
2847 }
2848 off2 := auxIntToInt32(v_0.AuxInt)
2849 sym2 := auxToSym(v_0.Aux)
2850 base := v_0.Args[0]
2851 mem := v_1
2852 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2853 break
2854 }
2855 v.reset(OpAMD64ANDLconstmodify)
2856 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2857 v.Aux = symToAux(mergeSym(sym1, sym2))
2858 v.AddArg2(base, mem)
2859 return true
2860 }
2861 return false
2862 }
2863 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2864 v_2 := v.Args[2]
2865 v_1 := v.Args[1]
2866 v_0 := v.Args[0]
2867 b := v.Block
2868 typ := &b.Func.Config.Types
2869
2870
2871
2872 for {
2873 off1 := auxIntToInt32(v.AuxInt)
2874 sym := auxToSym(v.Aux)
2875 val := v_0
2876 if v_1.Op != OpAMD64ADDQconst {
2877 break
2878 }
2879 off2 := auxIntToInt32(v_1.AuxInt)
2880 base := v_1.Args[0]
2881 mem := v_2
2882 if !(is32Bit(int64(off1) + int64(off2))) {
2883 break
2884 }
2885 v.reset(OpAMD64ANDLload)
2886 v.AuxInt = int32ToAuxInt(off1 + off2)
2887 v.Aux = symToAux(sym)
2888 v.AddArg3(val, base, mem)
2889 return true
2890 }
2891
2892
2893
2894 for {
2895 off1 := auxIntToInt32(v.AuxInt)
2896 sym1 := auxToSym(v.Aux)
2897 val := v_0
2898 if v_1.Op != OpAMD64LEAQ {
2899 break
2900 }
2901 off2 := auxIntToInt32(v_1.AuxInt)
2902 sym2 := auxToSym(v_1.Aux)
2903 base := v_1.Args[0]
2904 mem := v_2
2905 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2906 break
2907 }
2908 v.reset(OpAMD64ANDLload)
2909 v.AuxInt = int32ToAuxInt(off1 + off2)
2910 v.Aux = symToAux(mergeSym(sym1, sym2))
2911 v.AddArg3(val, base, mem)
2912 return true
2913 }
2914
2915
2916 for {
2917 off := auxIntToInt32(v.AuxInt)
2918 sym := auxToSym(v.Aux)
2919 x := v_0
2920 ptr := v_1
2921 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2922 break
2923 }
2924 y := v_2.Args[1]
2925 if ptr != v_2.Args[0] {
2926 break
2927 }
2928 v.reset(OpAMD64ANDL)
2929 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2930 v0.AddArg(y)
2931 v.AddArg2(x, v0)
2932 return true
2933 }
2934 return false
2935 }
2936 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
2937 v_2 := v.Args[2]
2938 v_1 := v.Args[1]
2939 v_0 := v.Args[0]
2940
2941
2942
2943 for {
2944 off1 := auxIntToInt32(v.AuxInt)
2945 sym := auxToSym(v.Aux)
2946 if v_0.Op != OpAMD64ADDQconst {
2947 break
2948 }
2949 off2 := auxIntToInt32(v_0.AuxInt)
2950 base := v_0.Args[0]
2951 val := v_1
2952 mem := v_2
2953 if !(is32Bit(int64(off1) + int64(off2))) {
2954 break
2955 }
2956 v.reset(OpAMD64ANDLmodify)
2957 v.AuxInt = int32ToAuxInt(off1 + off2)
2958 v.Aux = symToAux(sym)
2959 v.AddArg3(base, val, mem)
2960 return true
2961 }
2962
2963
2964
2965 for {
2966 off1 := auxIntToInt32(v.AuxInt)
2967 sym1 := auxToSym(v.Aux)
2968 if v_0.Op != OpAMD64LEAQ {
2969 break
2970 }
2971 off2 := auxIntToInt32(v_0.AuxInt)
2972 sym2 := auxToSym(v_0.Aux)
2973 base := v_0.Args[0]
2974 val := v_1
2975 mem := v_2
2976 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2977 break
2978 }
2979 v.reset(OpAMD64ANDLmodify)
2980 v.AuxInt = int32ToAuxInt(off1 + off2)
2981 v.Aux = symToAux(mergeSym(sym1, sym2))
2982 v.AddArg3(base, val, mem)
2983 return true
2984 }
2985 return false
2986 }
2987 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
2988 v_1 := v.Args[1]
2989 v_0 := v.Args[0]
2990
2991
2992 for {
2993 x := v_0
2994 if v_1.Op != OpAMD64SHLL {
2995 break
2996 }
2997 y := v_1.Args[1]
2998 v_1_0 := v_1.Args[0]
2999 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
3000 break
3001 }
3002 v.reset(OpAMD64BTRL)
3003 v.AddArg2(x, y)
3004 return true
3005 }
3006 return false
3007 }
3008 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
3009 v_1 := v.Args[1]
3010 v_0 := v.Args[0]
3011
3012
3013 for {
3014 x := v_0
3015 if v_1.Op != OpAMD64SHLQ {
3016 break
3017 }
3018 y := v_1.Args[1]
3019 v_1_0 := v_1.Args[0]
3020 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
3021 break
3022 }
3023 v.reset(OpAMD64BTRQ)
3024 v.AddArg2(x, y)
3025 return true
3026 }
3027 return false
3028 }
3029 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3030 v_1 := v.Args[1]
3031 v_0 := v.Args[0]
3032 b := v.Block
3033 typ := &b.Func.Config.Types
3034
3035
3036 for {
3037 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3038 if v_0.Op != OpAMD64NOTQ {
3039 continue
3040 }
3041 v_0_0 := v_0.Args[0]
3042 if v_0_0.Op != OpAMD64SHLQ {
3043 continue
3044 }
3045 y := v_0_0.Args[1]
3046 v_0_0_0 := v_0_0.Args[0]
3047 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3048 continue
3049 }
3050 x := v_1
3051 v.reset(OpAMD64BTRQ)
3052 v.AddArg2(x, y)
3053 return true
3054 }
3055 break
3056 }
3057
3058
3059
3060 for {
3061 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3062 if v_0.Op != OpAMD64MOVQconst {
3063 continue
3064 }
3065 c := auxIntToInt64(v_0.AuxInt)
3066 x := v_1
3067 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
3068 continue
3069 }
3070 v.reset(OpAMD64BTRQconst)
3071 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3072 v.AddArg(x)
3073 return true
3074 }
3075 break
3076 }
3077
3078
3079
3080 for {
3081 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3082 x := v_0
3083 if v_1.Op != OpAMD64MOVQconst {
3084 continue
3085 }
3086 c := auxIntToInt64(v_1.AuxInt)
3087 if !(is32Bit(c)) {
3088 continue
3089 }
3090 v.reset(OpAMD64ANDQconst)
3091 v.AuxInt = int32ToAuxInt(int32(c))
3092 v.AddArg(x)
3093 return true
3094 }
3095 break
3096 }
3097
3098
3099 for {
3100 x := v_0
3101 if x != v_1 {
3102 break
3103 }
3104 v.copyOf(x)
3105 return true
3106 }
3107
3108
3109
3110 for {
3111 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3112 x := v_0
3113 l := v_1
3114 if l.Op != OpAMD64MOVQload {
3115 continue
3116 }
3117 off := auxIntToInt32(l.AuxInt)
3118 sym := auxToSym(l.Aux)
3119 mem := l.Args[1]
3120 ptr := l.Args[0]
3121 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3122 continue
3123 }
3124 v.reset(OpAMD64ANDQload)
3125 v.AuxInt = int32ToAuxInt(off)
3126 v.Aux = symToAux(sym)
3127 v.AddArg3(x, ptr, mem)
3128 return true
3129 }
3130 break
3131 }
3132
3133
3134
3135 for {
3136 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3137 x := v_0
3138 if v_1.Op != OpAMD64NOTQ {
3139 continue
3140 }
3141 y := v_1.Args[0]
3142 if !(buildcfg.GOAMD64 >= 3) {
3143 continue
3144 }
3145 v.reset(OpAMD64ANDNQ)
3146 v.AddArg2(x, y)
3147 return true
3148 }
3149 break
3150 }
3151
3152
3153
3154 for {
3155 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3156 x := v_0
3157 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3158 continue
3159 }
3160 v.reset(OpAMD64BLSIQ)
3161 v.AddArg(x)
3162 return true
3163 }
3164 break
3165 }
3166
3167
3168
3169 for {
3170 t := v.Type
3171 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3172 x := v_0
3173 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3174 continue
3175 }
3176 v.reset(OpSelect0)
3177 v.Type = t
3178 v0 := b.NewValue0(v.Pos, OpAMD64BLSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
3179 v0.AddArg(x)
3180 v.AddArg(v0)
3181 return true
3182 }
3183 break
3184 }
3185 return false
3186 }
3187 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3188 v_0 := v.Args[0]
3189
3190
3191 for {
3192 c := auxIntToInt32(v.AuxInt)
3193 if v_0.Op != OpAMD64ANDQconst {
3194 break
3195 }
3196 d := auxIntToInt32(v_0.AuxInt)
3197 x := v_0.Args[0]
3198 v.reset(OpAMD64ANDQconst)
3199 v.AuxInt = int32ToAuxInt(c & d)
3200 v.AddArg(x)
3201 return true
3202 }
3203
3204
3205 for {
3206 if auxIntToInt32(v.AuxInt) != 0xFF {
3207 break
3208 }
3209 x := v_0
3210 v.reset(OpAMD64MOVBQZX)
3211 v.AddArg(x)
3212 return true
3213 }
3214
3215
3216 for {
3217 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3218 break
3219 }
3220 x := v_0
3221 v.reset(OpAMD64MOVWQZX)
3222 v.AddArg(x)
3223 return true
3224 }
3225
3226
3227 for {
3228 if auxIntToInt32(v.AuxInt) != 0 {
3229 break
3230 }
3231 v.reset(OpAMD64MOVQconst)
3232 v.AuxInt = int64ToAuxInt(0)
3233 return true
3234 }
3235
3236
3237 for {
3238 if auxIntToInt32(v.AuxInt) != -1 {
3239 break
3240 }
3241 x := v_0
3242 v.copyOf(x)
3243 return true
3244 }
3245
3246
3247 for {
3248 c := auxIntToInt32(v.AuxInt)
3249 if v_0.Op != OpAMD64MOVQconst {
3250 break
3251 }
3252 d := auxIntToInt64(v_0.AuxInt)
3253 v.reset(OpAMD64MOVQconst)
3254 v.AuxInt = int64ToAuxInt(int64(c) & d)
3255 return true
3256 }
3257 return false
3258 }
3259 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3260 v_1 := v.Args[1]
3261 v_0 := v.Args[0]
3262
3263
3264
3265 for {
3266 valoff1 := auxIntToValAndOff(v.AuxInt)
3267 sym := auxToSym(v.Aux)
3268 if v_0.Op != OpAMD64ADDQconst {
3269 break
3270 }
3271 off2 := auxIntToInt32(v_0.AuxInt)
3272 base := v_0.Args[0]
3273 mem := v_1
3274 if !(ValAndOff(valoff1).canAdd32(off2)) {
3275 break
3276 }
3277 v.reset(OpAMD64ANDQconstmodify)
3278 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3279 v.Aux = symToAux(sym)
3280 v.AddArg2(base, mem)
3281 return true
3282 }
3283
3284
3285
3286 for {
3287 valoff1 := auxIntToValAndOff(v.AuxInt)
3288 sym1 := auxToSym(v.Aux)
3289 if v_0.Op != OpAMD64LEAQ {
3290 break
3291 }
3292 off2 := auxIntToInt32(v_0.AuxInt)
3293 sym2 := auxToSym(v_0.Aux)
3294 base := v_0.Args[0]
3295 mem := v_1
3296 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3297 break
3298 }
3299 v.reset(OpAMD64ANDQconstmodify)
3300 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3301 v.Aux = symToAux(mergeSym(sym1, sym2))
3302 v.AddArg2(base, mem)
3303 return true
3304 }
3305 return false
3306 }
3307 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3308 v_2 := v.Args[2]
3309 v_1 := v.Args[1]
3310 v_0 := v.Args[0]
3311 b := v.Block
3312 typ := &b.Func.Config.Types
3313
3314
3315
3316 for {
3317 off1 := auxIntToInt32(v.AuxInt)
3318 sym := auxToSym(v.Aux)
3319 val := v_0
3320 if v_1.Op != OpAMD64ADDQconst {
3321 break
3322 }
3323 off2 := auxIntToInt32(v_1.AuxInt)
3324 base := v_1.Args[0]
3325 mem := v_2
3326 if !(is32Bit(int64(off1) + int64(off2))) {
3327 break
3328 }
3329 v.reset(OpAMD64ANDQload)
3330 v.AuxInt = int32ToAuxInt(off1 + off2)
3331 v.Aux = symToAux(sym)
3332 v.AddArg3(val, base, mem)
3333 return true
3334 }
3335
3336
3337
3338 for {
3339 off1 := auxIntToInt32(v.AuxInt)
3340 sym1 := auxToSym(v.Aux)
3341 val := v_0
3342 if v_1.Op != OpAMD64LEAQ {
3343 break
3344 }
3345 off2 := auxIntToInt32(v_1.AuxInt)
3346 sym2 := auxToSym(v_1.Aux)
3347 base := v_1.Args[0]
3348 mem := v_2
3349 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3350 break
3351 }
3352 v.reset(OpAMD64ANDQload)
3353 v.AuxInt = int32ToAuxInt(off1 + off2)
3354 v.Aux = symToAux(mergeSym(sym1, sym2))
3355 v.AddArg3(val, base, mem)
3356 return true
3357 }
3358
3359
3360 for {
3361 off := auxIntToInt32(v.AuxInt)
3362 sym := auxToSym(v.Aux)
3363 x := v_0
3364 ptr := v_1
3365 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3366 break
3367 }
3368 y := v_2.Args[1]
3369 if ptr != v_2.Args[0] {
3370 break
3371 }
3372 v.reset(OpAMD64ANDQ)
3373 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3374 v0.AddArg(y)
3375 v.AddArg2(x, v0)
3376 return true
3377 }
3378 return false
3379 }
3380 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3381 v_2 := v.Args[2]
3382 v_1 := v.Args[1]
3383 v_0 := v.Args[0]
3384
3385
3386
3387 for {
3388 off1 := auxIntToInt32(v.AuxInt)
3389 sym := auxToSym(v.Aux)
3390 if v_0.Op != OpAMD64ADDQconst {
3391 break
3392 }
3393 off2 := auxIntToInt32(v_0.AuxInt)
3394 base := v_0.Args[0]
3395 val := v_1
3396 mem := v_2
3397 if !(is32Bit(int64(off1) + int64(off2))) {
3398 break
3399 }
3400 v.reset(OpAMD64ANDQmodify)
3401 v.AuxInt = int32ToAuxInt(off1 + off2)
3402 v.Aux = symToAux(sym)
3403 v.AddArg3(base, val, mem)
3404 return true
3405 }
3406
3407
3408
3409 for {
3410 off1 := auxIntToInt32(v.AuxInt)
3411 sym1 := auxToSym(v.Aux)
3412 if v_0.Op != OpAMD64LEAQ {
3413 break
3414 }
3415 off2 := auxIntToInt32(v_0.AuxInt)
3416 sym2 := auxToSym(v_0.Aux)
3417 base := v_0.Args[0]
3418 val := v_1
3419 mem := v_2
3420 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3421 break
3422 }
3423 v.reset(OpAMD64ANDQmodify)
3424 v.AuxInt = int32ToAuxInt(off1 + off2)
3425 v.Aux = symToAux(mergeSym(sym1, sym2))
3426 v.AddArg3(base, val, mem)
3427 return true
3428 }
3429 return false
3430 }
3431 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3432 v_0 := v.Args[0]
3433 b := v.Block
3434
3435
3436 for {
3437 if v_0.Op != OpAMD64ORQconst {
3438 break
3439 }
3440 t := v_0.Type
3441 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3442 break
3443 }
3444 v_0_0 := v_0.Args[0]
3445 if v_0_0.Op != OpAMD64MOVBQZX {
3446 break
3447 }
3448 x := v_0_0.Args[0]
3449 v.reset(OpAMD64BSFQ)
3450 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3451 v0.AuxInt = int32ToAuxInt(1 << 8)
3452 v0.AddArg(x)
3453 v.AddArg(v0)
3454 return true
3455 }
3456
3457
3458 for {
3459 if v_0.Op != OpAMD64ORQconst {
3460 break
3461 }
3462 t := v_0.Type
3463 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3464 break
3465 }
3466 v_0_0 := v_0.Args[0]
3467 if v_0_0.Op != OpAMD64MOVWQZX {
3468 break
3469 }
3470 x := v_0_0.Args[0]
3471 v.reset(OpAMD64BSFQ)
3472 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3473 v0.AuxInt = int32ToAuxInt(1 << 16)
3474 v0.AddArg(x)
3475 v.AddArg(v0)
3476 return true
3477 }
3478 return false
3479 }
3480 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
3481 v_0 := v.Args[0]
3482 b := v.Block
3483 typ := &b.Func.Config.Types
3484
3485
3486 for {
3487 if v_0.Op != OpAMD64BSWAPL {
3488 break
3489 }
3490 p := v_0.Args[0]
3491 v.copyOf(p)
3492 return true
3493 }
3494
3495
3496
3497 for {
3498 x := v_0
3499 if x.Op != OpAMD64MOVLload {
3500 break
3501 }
3502 i := auxIntToInt32(x.AuxInt)
3503 s := auxToSym(x.Aux)
3504 mem := x.Args[1]
3505 p := x.Args[0]
3506 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3507 break
3508 }
3509 b = x.Block
3510 v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32)
3511 v.copyOf(v0)
3512 v0.AuxInt = int32ToAuxInt(i)
3513 v0.Aux = symToAux(s)
3514 v0.AddArg2(p, mem)
3515 return true
3516 }
3517
3518
3519
3520 for {
3521 x := v_0
3522 if x.Op != OpAMD64MOVBELload {
3523 break
3524 }
3525 i := auxIntToInt32(x.AuxInt)
3526 s := auxToSym(x.Aux)
3527 mem := x.Args[1]
3528 p := x.Args[0]
3529 if !(x.Uses == 1) {
3530 break
3531 }
3532 b = x.Block
3533 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32)
3534 v.copyOf(v0)
3535 v0.AuxInt = int32ToAuxInt(i)
3536 v0.Aux = symToAux(s)
3537 v0.AddArg2(p, mem)
3538 return true
3539 }
3540 return false
3541 }
3542 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
3543 v_0 := v.Args[0]
3544 b := v.Block
3545 typ := &b.Func.Config.Types
3546
3547
3548 for {
3549 if v_0.Op != OpAMD64BSWAPQ {
3550 break
3551 }
3552 p := v_0.Args[0]
3553 v.copyOf(p)
3554 return true
3555 }
3556
3557
3558
3559 for {
3560 x := v_0
3561 if x.Op != OpAMD64MOVQload {
3562 break
3563 }
3564 i := auxIntToInt32(x.AuxInt)
3565 s := auxToSym(x.Aux)
3566 mem := x.Args[1]
3567 p := x.Args[0]
3568 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3569 break
3570 }
3571 b = x.Block
3572 v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64)
3573 v.copyOf(v0)
3574 v0.AuxInt = int32ToAuxInt(i)
3575 v0.Aux = symToAux(s)
3576 v0.AddArg2(p, mem)
3577 return true
3578 }
3579
3580
3581
3582 for {
3583 x := v_0
3584 if x.Op != OpAMD64MOVBEQload {
3585 break
3586 }
3587 i := auxIntToInt32(x.AuxInt)
3588 s := auxToSym(x.Aux)
3589 mem := x.Args[1]
3590 p := x.Args[0]
3591 if !(x.Uses == 1) {
3592 break
3593 }
3594 b = x.Block
3595 v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64)
3596 v.copyOf(v0)
3597 v0.AuxInt = int32ToAuxInt(i)
3598 v0.Aux = symToAux(s)
3599 v0.AddArg2(p, mem)
3600 return true
3601 }
3602 return false
3603 }
3604 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3605 v_0 := v.Args[0]
3606
3607
3608 for {
3609 c := auxIntToInt8(v.AuxInt)
3610 if v_0.Op != OpAMD64MOVQconst {
3611 break
3612 }
3613 d := auxIntToInt64(v_0.AuxInt)
3614 v.reset(OpAMD64MOVQconst)
3615 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3616 return true
3617 }
3618 return false
3619 }
3620 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3621 v_0 := v.Args[0]
3622
3623
3624
3625 for {
3626 c := auxIntToInt8(v.AuxInt)
3627 if v_0.Op != OpAMD64SHRQconst {
3628 break
3629 }
3630 d := auxIntToInt8(v_0.AuxInt)
3631 x := v_0.Args[0]
3632 if !((c + d) < 64) {
3633 break
3634 }
3635 v.reset(OpAMD64BTQconst)
3636 v.AuxInt = int8ToAuxInt(c + d)
3637 v.AddArg(x)
3638 return true
3639 }
3640
3641
3642
3643 for {
3644 c := auxIntToInt8(v.AuxInt)
3645 if v_0.Op != OpAMD64SHLQconst {
3646 break
3647 }
3648 d := auxIntToInt8(v_0.AuxInt)
3649 x := v_0.Args[0]
3650 if !(c > d) {
3651 break
3652 }
3653 v.reset(OpAMD64BTLconst)
3654 v.AuxInt = int8ToAuxInt(c - d)
3655 v.AddArg(x)
3656 return true
3657 }
3658
3659
3660 for {
3661 if auxIntToInt8(v.AuxInt) != 0 {
3662 break
3663 }
3664 s := v_0
3665 if s.Op != OpAMD64SHRQ {
3666 break
3667 }
3668 y := s.Args[1]
3669 x := s.Args[0]
3670 v.reset(OpAMD64BTQ)
3671 v.AddArg2(y, x)
3672 return true
3673 }
3674
3675
3676
3677 for {
3678 c := auxIntToInt8(v.AuxInt)
3679 if v_0.Op != OpAMD64SHRLconst {
3680 break
3681 }
3682 d := auxIntToInt8(v_0.AuxInt)
3683 x := v_0.Args[0]
3684 if !((c + d) < 32) {
3685 break
3686 }
3687 v.reset(OpAMD64BTLconst)
3688 v.AuxInt = int8ToAuxInt(c + d)
3689 v.AddArg(x)
3690 return true
3691 }
3692
3693
3694
3695 for {
3696 c := auxIntToInt8(v.AuxInt)
3697 if v_0.Op != OpAMD64SHLLconst {
3698 break
3699 }
3700 d := auxIntToInt8(v_0.AuxInt)
3701 x := v_0.Args[0]
3702 if !(c > d) {
3703 break
3704 }
3705 v.reset(OpAMD64BTLconst)
3706 v.AuxInt = int8ToAuxInt(c - d)
3707 v.AddArg(x)
3708 return true
3709 }
3710
3711
3712 for {
3713 if auxIntToInt8(v.AuxInt) != 0 {
3714 break
3715 }
3716 s := v_0
3717 if s.Op != OpAMD64SHRL {
3718 break
3719 }
3720 y := s.Args[1]
3721 x := s.Args[0]
3722 v.reset(OpAMD64BTL)
3723 v.AddArg2(y, x)
3724 return true
3725 }
3726
3727
3728 for {
3729 if auxIntToInt8(v.AuxInt) != 0 {
3730 break
3731 }
3732 s := v_0
3733 if s.Op != OpAMD64SHRXL {
3734 break
3735 }
3736 y := s.Args[1]
3737 x := s.Args[0]
3738 v.reset(OpAMD64BTL)
3739 v.AddArg2(y, x)
3740 return true
3741 }
3742 return false
3743 }
3744 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3745 v_0 := v.Args[0]
3746
3747
3748
3749 for {
3750 c := auxIntToInt8(v.AuxInt)
3751 if v_0.Op != OpAMD64SHRQconst {
3752 break
3753 }
3754 d := auxIntToInt8(v_0.AuxInt)
3755 x := v_0.Args[0]
3756 if !((c + d) < 64) {
3757 break
3758 }
3759 v.reset(OpAMD64BTQconst)
3760 v.AuxInt = int8ToAuxInt(c + d)
3761 v.AddArg(x)
3762 return true
3763 }
3764
3765
3766
3767 for {
3768 c := auxIntToInt8(v.AuxInt)
3769 if v_0.Op != OpAMD64SHLQconst {
3770 break
3771 }
3772 d := auxIntToInt8(v_0.AuxInt)
3773 x := v_0.Args[0]
3774 if !(c > d) {
3775 break
3776 }
3777 v.reset(OpAMD64BTQconst)
3778 v.AuxInt = int8ToAuxInt(c - d)
3779 v.AddArg(x)
3780 return true
3781 }
3782
3783
3784 for {
3785 if auxIntToInt8(v.AuxInt) != 0 {
3786 break
3787 }
3788 s := v_0
3789 if s.Op != OpAMD64SHRQ {
3790 break
3791 }
3792 y := s.Args[1]
3793 x := s.Args[0]
3794 v.reset(OpAMD64BTQ)
3795 v.AddArg2(y, x)
3796 return true
3797 }
3798 return false
3799 }
3800 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
3801 v_0 := v.Args[0]
3802
3803
3804 for {
3805 c := auxIntToInt8(v.AuxInt)
3806 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
3807 break
3808 }
3809 x := v_0.Args[0]
3810 v.reset(OpAMD64BTRQconst)
3811 v.AuxInt = int8ToAuxInt(c)
3812 v.AddArg(x)
3813 return true
3814 }
3815
3816
3817 for {
3818 c := auxIntToInt8(v.AuxInt)
3819 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3820 break
3821 }
3822 x := v_0.Args[0]
3823 v.reset(OpAMD64BTRQconst)
3824 v.AuxInt = int8ToAuxInt(c)
3825 v.AddArg(x)
3826 return true
3827 }
3828
3829
3830 for {
3831 c := auxIntToInt8(v.AuxInt)
3832 if v_0.Op != OpAMD64MOVQconst {
3833 break
3834 }
3835 d := auxIntToInt64(v_0.AuxInt)
3836 v.reset(OpAMD64MOVQconst)
3837 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
3838 return true
3839 }
3840 return false
3841 }
3842 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
3843 v_0 := v.Args[0]
3844
3845
3846 for {
3847 c := auxIntToInt8(v.AuxInt)
3848 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
3849 break
3850 }
3851 x := v_0.Args[0]
3852 v.reset(OpAMD64BTSQconst)
3853 v.AuxInt = int8ToAuxInt(c)
3854 v.AddArg(x)
3855 return true
3856 }
3857
3858
3859 for {
3860 c := auxIntToInt8(v.AuxInt)
3861 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3862 break
3863 }
3864 x := v_0.Args[0]
3865 v.reset(OpAMD64BTSQconst)
3866 v.AuxInt = int8ToAuxInt(c)
3867 v.AddArg(x)
3868 return true
3869 }
3870
3871
3872 for {
3873 c := auxIntToInt8(v.AuxInt)
3874 if v_0.Op != OpAMD64MOVQconst {
3875 break
3876 }
3877 d := auxIntToInt64(v_0.AuxInt)
3878 v.reset(OpAMD64MOVQconst)
3879 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
3880 return true
3881 }
3882 return false
3883 }
3884 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
3885 v_2 := v.Args[2]
3886 v_1 := v.Args[1]
3887 v_0 := v.Args[0]
3888
3889
3890 for {
3891 x := v_0
3892 y := v_1
3893 if v_2.Op != OpAMD64InvertFlags {
3894 break
3895 }
3896 cond := v_2.Args[0]
3897 v.reset(OpAMD64CMOVLLS)
3898 v.AddArg3(x, y, cond)
3899 return true
3900 }
3901
3902
3903 for {
3904 x := v_1
3905 if v_2.Op != OpAMD64FlagEQ {
3906 break
3907 }
3908 v.copyOf(x)
3909 return true
3910 }
3911
3912
3913 for {
3914 x := v_1
3915 if v_2.Op != OpAMD64FlagGT_UGT {
3916 break
3917 }
3918 v.copyOf(x)
3919 return true
3920 }
3921
3922
3923 for {
3924 y := v_0
3925 if v_2.Op != OpAMD64FlagGT_ULT {
3926 break
3927 }
3928 v.copyOf(y)
3929 return true
3930 }
3931
3932
3933 for {
3934 y := v_0
3935 if v_2.Op != OpAMD64FlagLT_ULT {
3936 break
3937 }
3938 v.copyOf(y)
3939 return true
3940 }
3941
3942
3943 for {
3944 x := v_1
3945 if v_2.Op != OpAMD64FlagLT_UGT {
3946 break
3947 }
3948 v.copyOf(x)
3949 return true
3950 }
3951 return false
3952 }
3953 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
3954 v_2 := v.Args[2]
3955 v_1 := v.Args[1]
3956 v_0 := v.Args[0]
3957
3958
3959 for {
3960 x := v_0
3961 y := v_1
3962 if v_2.Op != OpAMD64InvertFlags {
3963 break
3964 }
3965 cond := v_2.Args[0]
3966 v.reset(OpAMD64CMOVLHI)
3967 v.AddArg3(x, y, cond)
3968 return true
3969 }
3970
3971
3972 for {
3973 y := v_0
3974 if v_2.Op != OpAMD64FlagEQ {
3975 break
3976 }
3977 v.copyOf(y)
3978 return true
3979 }
3980
3981
3982 for {
3983 y := v_0
3984 if v_2.Op != OpAMD64FlagGT_UGT {
3985 break
3986 }
3987 v.copyOf(y)
3988 return true
3989 }
3990
3991
3992 for {
3993 x := v_1
3994 if v_2.Op != OpAMD64FlagGT_ULT {
3995 break
3996 }
3997 v.copyOf(x)
3998 return true
3999 }
4000
4001
4002 for {
4003 x := v_1
4004 if v_2.Op != OpAMD64FlagLT_ULT {
4005 break
4006 }
4007 v.copyOf(x)
4008 return true
4009 }
4010
4011
4012 for {
4013 y := v_0
4014 if v_2.Op != OpAMD64FlagLT_UGT {
4015 break
4016 }
4017 v.copyOf(y)
4018 return true
4019 }
4020 return false
4021 }
4022 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4023 v_2 := v.Args[2]
4024 v_1 := v.Args[1]
4025 v_0 := v.Args[0]
4026 b := v.Block
4027
4028
4029 for {
4030 x := v_0
4031 y := v_1
4032 if v_2.Op != OpAMD64InvertFlags {
4033 break
4034 }
4035 cond := v_2.Args[0]
4036 v.reset(OpAMD64CMOVLEQ)
4037 v.AddArg3(x, y, cond)
4038 return true
4039 }
4040
4041
4042 for {
4043 x := v_1
4044 if v_2.Op != OpAMD64FlagEQ {
4045 break
4046 }
4047 v.copyOf(x)
4048 return true
4049 }
4050
4051
4052 for {
4053 y := v_0
4054 if v_2.Op != OpAMD64FlagGT_UGT {
4055 break
4056 }
4057 v.copyOf(y)
4058 return true
4059 }
4060
4061
4062 for {
4063 y := v_0
4064 if v_2.Op != OpAMD64FlagGT_ULT {
4065 break
4066 }
4067 v.copyOf(y)
4068 return true
4069 }
4070
4071
4072 for {
4073 y := v_0
4074 if v_2.Op != OpAMD64FlagLT_ULT {
4075 break
4076 }
4077 v.copyOf(y)
4078 return true
4079 }
4080
4081
4082 for {
4083 y := v_0
4084 if v_2.Op != OpAMD64FlagLT_UGT {
4085 break
4086 }
4087 v.copyOf(y)
4088 return true
4089 }
4090
4091
4092 for {
4093 x := v_0
4094 y := v_1
4095 if v_2.Op != OpAMD64TESTQ {
4096 break
4097 }
4098 _ = v_2.Args[1]
4099 v_2_0 := v_2.Args[0]
4100 v_2_1 := v_2.Args[1]
4101 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4102 s := v_2_0
4103 if s.Op != OpSelect0 {
4104 continue
4105 }
4106 blsr := s.Args[0]
4107 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4108 continue
4109 }
4110 v.reset(OpAMD64CMOVLEQ)
4111 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4112 v0.AddArg(blsr)
4113 v.AddArg3(x, y, v0)
4114 return true
4115 }
4116 break
4117 }
4118
4119
4120 for {
4121 x := v_0
4122 y := v_1
4123 if v_2.Op != OpAMD64TESTL {
4124 break
4125 }
4126 _ = v_2.Args[1]
4127 v_2_0 := v_2.Args[0]
4128 v_2_1 := v_2.Args[1]
4129 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4130 s := v_2_0
4131 if s.Op != OpSelect0 {
4132 continue
4133 }
4134 blsr := s.Args[0]
4135 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4136 continue
4137 }
4138 v.reset(OpAMD64CMOVLEQ)
4139 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4140 v0.AddArg(blsr)
4141 v.AddArg3(x, y, v0)
4142 return true
4143 }
4144 break
4145 }
4146 return false
4147 }
4148 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4149 v_2 := v.Args[2]
4150 v_1 := v.Args[1]
4151 v_0 := v.Args[0]
4152
4153
4154 for {
4155 x := v_0
4156 y := v_1
4157 if v_2.Op != OpAMD64InvertFlags {
4158 break
4159 }
4160 cond := v_2.Args[0]
4161 v.reset(OpAMD64CMOVLLE)
4162 v.AddArg3(x, y, cond)
4163 return true
4164 }
4165
4166
4167 for {
4168 x := v_1
4169 if v_2.Op != OpAMD64FlagEQ {
4170 break
4171 }
4172 v.copyOf(x)
4173 return true
4174 }
4175
4176
4177 for {
4178 x := v_1
4179 if v_2.Op != OpAMD64FlagGT_UGT {
4180 break
4181 }
4182 v.copyOf(x)
4183 return true
4184 }
4185
4186
4187 for {
4188 x := v_1
4189 if v_2.Op != OpAMD64FlagGT_ULT {
4190 break
4191 }
4192 v.copyOf(x)
4193 return true
4194 }
4195
4196
4197 for {
4198 y := v_0
4199 if v_2.Op != OpAMD64FlagLT_ULT {
4200 break
4201 }
4202 v.copyOf(y)
4203 return true
4204 }
4205
4206
4207 for {
4208 y := v_0
4209 if v_2.Op != OpAMD64FlagLT_UGT {
4210 break
4211 }
4212 v.copyOf(y)
4213 return true
4214 }
4215 return false
4216 }
4217 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4218 v_2 := v.Args[2]
4219 v_1 := v.Args[1]
4220 v_0 := v.Args[0]
4221
4222
4223 for {
4224 x := v_0
4225 y := v_1
4226 if v_2.Op != OpAMD64InvertFlags {
4227 break
4228 }
4229 cond := v_2.Args[0]
4230 v.reset(OpAMD64CMOVLLT)
4231 v.AddArg3(x, y, cond)
4232 return true
4233 }
4234
4235
4236 for {
4237 y := v_0
4238 if v_2.Op != OpAMD64FlagEQ {
4239 break
4240 }
4241 v.copyOf(y)
4242 return true
4243 }
4244
4245
4246 for {
4247 x := v_1
4248 if v_2.Op != OpAMD64FlagGT_UGT {
4249 break
4250 }
4251 v.copyOf(x)
4252 return true
4253 }
4254
4255
4256 for {
4257 x := v_1
4258 if v_2.Op != OpAMD64FlagGT_ULT {
4259 break
4260 }
4261 v.copyOf(x)
4262 return true
4263 }
4264
4265
4266 for {
4267 y := v_0
4268 if v_2.Op != OpAMD64FlagLT_ULT {
4269 break
4270 }
4271 v.copyOf(y)
4272 return true
4273 }
4274
4275
4276 for {
4277 y := v_0
4278 if v_2.Op != OpAMD64FlagLT_UGT {
4279 break
4280 }
4281 v.copyOf(y)
4282 return true
4283 }
4284 return false
4285 }
4286 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4287 v_2 := v.Args[2]
4288 v_1 := v.Args[1]
4289 v_0 := v.Args[0]
4290
4291
4292 for {
4293 x := v_0
4294 y := v_1
4295 if v_2.Op != OpAMD64InvertFlags {
4296 break
4297 }
4298 cond := v_2.Args[0]
4299 v.reset(OpAMD64CMOVLCS)
4300 v.AddArg3(x, y, cond)
4301 return true
4302 }
4303
4304
4305 for {
4306 y := v_0
4307 if v_2.Op != OpAMD64FlagEQ {
4308 break
4309 }
4310 v.copyOf(y)
4311 return true
4312 }
4313
4314
4315 for {
4316 x := v_1
4317 if v_2.Op != OpAMD64FlagGT_UGT {
4318 break
4319 }
4320 v.copyOf(x)
4321 return true
4322 }
4323
4324
4325 for {
4326 y := v_0
4327 if v_2.Op != OpAMD64FlagGT_ULT {
4328 break
4329 }
4330 v.copyOf(y)
4331 return true
4332 }
4333
4334
4335 for {
4336 y := v_0
4337 if v_2.Op != OpAMD64FlagLT_ULT {
4338 break
4339 }
4340 v.copyOf(y)
4341 return true
4342 }
4343
4344
4345 for {
4346 x := v_1
4347 if v_2.Op != OpAMD64FlagLT_UGT {
4348 break
4349 }
4350 v.copyOf(x)
4351 return true
4352 }
4353 return false
4354 }
4355 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
4356 v_2 := v.Args[2]
4357 v_1 := v.Args[1]
4358 v_0 := v.Args[0]
4359
4360
4361 for {
4362 x := v_0
4363 y := v_1
4364 if v_2.Op != OpAMD64InvertFlags {
4365 break
4366 }
4367 cond := v_2.Args[0]
4368 v.reset(OpAMD64CMOVLGE)
4369 v.AddArg3(x, y, cond)
4370 return true
4371 }
4372
4373
4374 for {
4375 x := v_1
4376 if v_2.Op != OpAMD64FlagEQ {
4377 break
4378 }
4379 v.copyOf(x)
4380 return true
4381 }
4382
4383
4384 for {
4385 y := v_0
4386 if v_2.Op != OpAMD64FlagGT_UGT {
4387 break
4388 }
4389 v.copyOf(y)
4390 return true
4391 }
4392
4393
4394 for {
4395 y := v_0
4396 if v_2.Op != OpAMD64FlagGT_ULT {
4397 break
4398 }
4399 v.copyOf(y)
4400 return true
4401 }
4402
4403
4404 for {
4405 x := v_1
4406 if v_2.Op != OpAMD64FlagLT_ULT {
4407 break
4408 }
4409 v.copyOf(x)
4410 return true
4411 }
4412
4413
4414 for {
4415 x := v_1
4416 if v_2.Op != OpAMD64FlagLT_UGT {
4417 break
4418 }
4419 v.copyOf(x)
4420 return true
4421 }
4422 return false
4423 }
4424 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
4425 v_2 := v.Args[2]
4426 v_1 := v.Args[1]
4427 v_0 := v.Args[0]
4428
4429
4430 for {
4431 x := v_0
4432 y := v_1
4433 if v_2.Op != OpAMD64InvertFlags {
4434 break
4435 }
4436 cond := v_2.Args[0]
4437 v.reset(OpAMD64CMOVLCC)
4438 v.AddArg3(x, y, cond)
4439 return true
4440 }
4441
4442
4443 for {
4444 x := v_1
4445 if v_2.Op != OpAMD64FlagEQ {
4446 break
4447 }
4448 v.copyOf(x)
4449 return true
4450 }
4451
4452
4453 for {
4454 y := v_0
4455 if v_2.Op != OpAMD64FlagGT_UGT {
4456 break
4457 }
4458 v.copyOf(y)
4459 return true
4460 }
4461
4462
4463 for {
4464 x := v_1
4465 if v_2.Op != OpAMD64FlagGT_ULT {
4466 break
4467 }
4468 v.copyOf(x)
4469 return true
4470 }
4471
4472
4473 for {
4474 x := v_1
4475 if v_2.Op != OpAMD64FlagLT_ULT {
4476 break
4477 }
4478 v.copyOf(x)
4479 return true
4480 }
4481
4482
4483 for {
4484 y := v_0
4485 if v_2.Op != OpAMD64FlagLT_UGT {
4486 break
4487 }
4488 v.copyOf(y)
4489 return true
4490 }
4491 return false
4492 }
4493 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
4494 v_2 := v.Args[2]
4495 v_1 := v.Args[1]
4496 v_0 := v.Args[0]
4497
4498
4499 for {
4500 x := v_0
4501 y := v_1
4502 if v_2.Op != OpAMD64InvertFlags {
4503 break
4504 }
4505 cond := v_2.Args[0]
4506 v.reset(OpAMD64CMOVLGT)
4507 v.AddArg3(x, y, cond)
4508 return true
4509 }
4510
4511
4512 for {
4513 y := v_0
4514 if v_2.Op != OpAMD64FlagEQ {
4515 break
4516 }
4517 v.copyOf(y)
4518 return true
4519 }
4520
4521
4522 for {
4523 y := v_0
4524 if v_2.Op != OpAMD64FlagGT_UGT {
4525 break
4526 }
4527 v.copyOf(y)
4528 return true
4529 }
4530
4531
4532 for {
4533 y := v_0
4534 if v_2.Op != OpAMD64FlagGT_ULT {
4535 break
4536 }
4537 v.copyOf(y)
4538 return true
4539 }
4540
4541
4542 for {
4543 x := v_1
4544 if v_2.Op != OpAMD64FlagLT_ULT {
4545 break
4546 }
4547 v.copyOf(x)
4548 return true
4549 }
4550
4551
4552 for {
4553 x := v_1
4554 if v_2.Op != OpAMD64FlagLT_UGT {
4555 break
4556 }
4557 v.copyOf(x)
4558 return true
4559 }
4560 return false
4561 }
4562 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
4563 v_2 := v.Args[2]
4564 v_1 := v.Args[1]
4565 v_0 := v.Args[0]
4566 b := v.Block
4567
4568
4569 for {
4570 x := v_0
4571 y := v_1
4572 if v_2.Op != OpAMD64InvertFlags {
4573 break
4574 }
4575 cond := v_2.Args[0]
4576 v.reset(OpAMD64CMOVLNE)
4577 v.AddArg3(x, y, cond)
4578 return true
4579 }
4580
4581
4582 for {
4583 y := v_0
4584 if v_2.Op != OpAMD64FlagEQ {
4585 break
4586 }
4587 v.copyOf(y)
4588 return true
4589 }
4590
4591
4592 for {
4593 x := v_1
4594 if v_2.Op != OpAMD64FlagGT_UGT {
4595 break
4596 }
4597 v.copyOf(x)
4598 return true
4599 }
4600
4601
4602 for {
4603 x := v_1
4604 if v_2.Op != OpAMD64FlagGT_ULT {
4605 break
4606 }
4607 v.copyOf(x)
4608 return true
4609 }
4610
4611
4612 for {
4613 x := v_1
4614 if v_2.Op != OpAMD64FlagLT_ULT {
4615 break
4616 }
4617 v.copyOf(x)
4618 return true
4619 }
4620
4621
4622 for {
4623 x := v_1
4624 if v_2.Op != OpAMD64FlagLT_UGT {
4625 break
4626 }
4627 v.copyOf(x)
4628 return true
4629 }
4630
4631
4632 for {
4633 x := v_0
4634 y := v_1
4635 if v_2.Op != OpAMD64TESTQ {
4636 break
4637 }
4638 _ = v_2.Args[1]
4639 v_2_0 := v_2.Args[0]
4640 v_2_1 := v_2.Args[1]
4641 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4642 s := v_2_0
4643 if s.Op != OpSelect0 {
4644 continue
4645 }
4646 blsr := s.Args[0]
4647 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4648 continue
4649 }
4650 v.reset(OpAMD64CMOVLNE)
4651 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4652 v0.AddArg(blsr)
4653 v.AddArg3(x, y, v0)
4654 return true
4655 }
4656 break
4657 }
4658
4659
4660 for {
4661 x := v_0
4662 y := v_1
4663 if v_2.Op != OpAMD64TESTL {
4664 break
4665 }
4666 _ = v_2.Args[1]
4667 v_2_0 := v_2.Args[0]
4668 v_2_1 := v_2.Args[1]
4669 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4670 s := v_2_0
4671 if s.Op != OpSelect0 {
4672 continue
4673 }
4674 blsr := s.Args[0]
4675 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4676 continue
4677 }
4678 v.reset(OpAMD64CMOVLNE)
4679 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4680 v0.AddArg(blsr)
4681 v.AddArg3(x, y, v0)
4682 return true
4683 }
4684 break
4685 }
4686 return false
4687 }
4688 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
4689 v_2 := v.Args[2]
4690 v_1 := v.Args[1]
4691 v_0 := v.Args[0]
4692
4693
4694 for {
4695 x := v_0
4696 y := v_1
4697 if v_2.Op != OpAMD64InvertFlags {
4698 break
4699 }
4700 cond := v_2.Args[0]
4701 v.reset(OpAMD64CMOVQLS)
4702 v.AddArg3(x, y, cond)
4703 return true
4704 }
4705
4706
4707 for {
4708 x := v_1
4709 if v_2.Op != OpAMD64FlagEQ {
4710 break
4711 }
4712 v.copyOf(x)
4713 return true
4714 }
4715
4716
4717 for {
4718 x := v_1
4719 if v_2.Op != OpAMD64FlagGT_UGT {
4720 break
4721 }
4722 v.copyOf(x)
4723 return true
4724 }
4725
4726
4727 for {
4728 y := v_0
4729 if v_2.Op != OpAMD64FlagGT_ULT {
4730 break
4731 }
4732 v.copyOf(y)
4733 return true
4734 }
4735
4736
4737 for {
4738 y := v_0
4739 if v_2.Op != OpAMD64FlagLT_ULT {
4740 break
4741 }
4742 v.copyOf(y)
4743 return true
4744 }
4745
4746
4747 for {
4748 x := v_1
4749 if v_2.Op != OpAMD64FlagLT_UGT {
4750 break
4751 }
4752 v.copyOf(x)
4753 return true
4754 }
4755 return false
4756 }
4757 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
4758 v_2 := v.Args[2]
4759 v_1 := v.Args[1]
4760 v_0 := v.Args[0]
4761
4762
4763 for {
4764 x := v_0
4765 y := v_1
4766 if v_2.Op != OpAMD64InvertFlags {
4767 break
4768 }
4769 cond := v_2.Args[0]
4770 v.reset(OpAMD64CMOVQHI)
4771 v.AddArg3(x, y, cond)
4772 return true
4773 }
4774
4775
4776 for {
4777 y := v_0
4778 if v_2.Op != OpAMD64FlagEQ {
4779 break
4780 }
4781 v.copyOf(y)
4782 return true
4783 }
4784
4785
4786 for {
4787 y := v_0
4788 if v_2.Op != OpAMD64FlagGT_UGT {
4789 break
4790 }
4791 v.copyOf(y)
4792 return true
4793 }
4794
4795
4796 for {
4797 x := v_1
4798 if v_2.Op != OpAMD64FlagGT_ULT {
4799 break
4800 }
4801 v.copyOf(x)
4802 return true
4803 }
4804
4805
4806 for {
4807 x := v_1
4808 if v_2.Op != OpAMD64FlagLT_ULT {
4809 break
4810 }
4811 v.copyOf(x)
4812 return true
4813 }
4814
4815
4816 for {
4817 y := v_0
4818 if v_2.Op != OpAMD64FlagLT_UGT {
4819 break
4820 }
4821 v.copyOf(y)
4822 return true
4823 }
4824 return false
4825 }
4826 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
4827 v_2 := v.Args[2]
4828 v_1 := v.Args[1]
4829 v_0 := v.Args[0]
4830 b := v.Block
4831
4832
4833 for {
4834 x := v_0
4835 y := v_1
4836 if v_2.Op != OpAMD64InvertFlags {
4837 break
4838 }
4839 cond := v_2.Args[0]
4840 v.reset(OpAMD64CMOVQEQ)
4841 v.AddArg3(x, y, cond)
4842 return true
4843 }
4844
4845
4846 for {
4847 x := v_1
4848 if v_2.Op != OpAMD64FlagEQ {
4849 break
4850 }
4851 v.copyOf(x)
4852 return true
4853 }
4854
4855
4856 for {
4857 y := v_0
4858 if v_2.Op != OpAMD64FlagGT_UGT {
4859 break
4860 }
4861 v.copyOf(y)
4862 return true
4863 }
4864
4865
4866 for {
4867 y := v_0
4868 if v_2.Op != OpAMD64FlagGT_ULT {
4869 break
4870 }
4871 v.copyOf(y)
4872 return true
4873 }
4874
4875
4876 for {
4877 y := v_0
4878 if v_2.Op != OpAMD64FlagLT_ULT {
4879 break
4880 }
4881 v.copyOf(y)
4882 return true
4883 }
4884
4885
4886 for {
4887 y := v_0
4888 if v_2.Op != OpAMD64FlagLT_UGT {
4889 break
4890 }
4891 v.copyOf(y)
4892 return true
4893 }
4894
4895
4896
4897 for {
4898 x := v_0
4899 if v_2.Op != OpSelect1 {
4900 break
4901 }
4902 v_2_0 := v_2.Args[0]
4903 if v_2_0.Op != OpAMD64BSFQ {
4904 break
4905 }
4906 v_2_0_0 := v_2_0.Args[0]
4907 if v_2_0_0.Op != OpAMD64ORQconst {
4908 break
4909 }
4910 c := auxIntToInt32(v_2_0_0.AuxInt)
4911 if !(c != 0) {
4912 break
4913 }
4914 v.copyOf(x)
4915 return true
4916 }
4917
4918
4919
4920 for {
4921 x := v_0
4922 if v_2.Op != OpSelect1 {
4923 break
4924 }
4925 v_2_0 := v_2.Args[0]
4926 if v_2_0.Op != OpAMD64BSRQ {
4927 break
4928 }
4929 v_2_0_0 := v_2_0.Args[0]
4930 if v_2_0_0.Op != OpAMD64ORQconst {
4931 break
4932 }
4933 c := auxIntToInt32(v_2_0_0.AuxInt)
4934 if !(c != 0) {
4935 break
4936 }
4937 v.copyOf(x)
4938 return true
4939 }
4940
4941
4942 for {
4943 x := v_0
4944 y := v_1
4945 if v_2.Op != OpAMD64TESTQ {
4946 break
4947 }
4948 _ = v_2.Args[1]
4949 v_2_0 := v_2.Args[0]
4950 v_2_1 := v_2.Args[1]
4951 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4952 s := v_2_0
4953 if s.Op != OpSelect0 {
4954 continue
4955 }
4956 blsr := s.Args[0]
4957 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4958 continue
4959 }
4960 v.reset(OpAMD64CMOVQEQ)
4961 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4962 v0.AddArg(blsr)
4963 v.AddArg3(x, y, v0)
4964 return true
4965 }
4966 break
4967 }
4968
4969
4970 for {
4971 x := v_0
4972 y := v_1
4973 if v_2.Op != OpAMD64TESTL {
4974 break
4975 }
4976 _ = v_2.Args[1]
4977 v_2_0 := v_2.Args[0]
4978 v_2_1 := v_2.Args[1]
4979 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4980 s := v_2_0
4981 if s.Op != OpSelect0 {
4982 continue
4983 }
4984 blsr := s.Args[0]
4985 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4986 continue
4987 }
4988 v.reset(OpAMD64CMOVQEQ)
4989 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4990 v0.AddArg(blsr)
4991 v.AddArg3(x, y, v0)
4992 return true
4993 }
4994 break
4995 }
4996 return false
4997 }
4998 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
4999 v_2 := v.Args[2]
5000 v_1 := v.Args[1]
5001 v_0 := v.Args[0]
5002
5003
5004 for {
5005 x := v_0
5006 y := v_1
5007 if v_2.Op != OpAMD64InvertFlags {
5008 break
5009 }
5010 cond := v_2.Args[0]
5011 v.reset(OpAMD64CMOVQLE)
5012 v.AddArg3(x, y, cond)
5013 return true
5014 }
5015
5016
5017 for {
5018 x := v_1
5019 if v_2.Op != OpAMD64FlagEQ {
5020 break
5021 }
5022 v.copyOf(x)
5023 return true
5024 }
5025
5026
5027 for {
5028 x := v_1
5029 if v_2.Op != OpAMD64FlagGT_UGT {
5030 break
5031 }
5032 v.copyOf(x)
5033 return true
5034 }
5035
5036
5037 for {
5038 x := v_1
5039 if v_2.Op != OpAMD64FlagGT_ULT {
5040 break
5041 }
5042 v.copyOf(x)
5043 return true
5044 }
5045
5046
5047 for {
5048 y := v_0
5049 if v_2.Op != OpAMD64FlagLT_ULT {
5050 break
5051 }
5052 v.copyOf(y)
5053 return true
5054 }
5055
5056
5057 for {
5058 y := v_0
5059 if v_2.Op != OpAMD64FlagLT_UGT {
5060 break
5061 }
5062 v.copyOf(y)
5063 return true
5064 }
5065 return false
5066 }
5067 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5068 v_2 := v.Args[2]
5069 v_1 := v.Args[1]
5070 v_0 := v.Args[0]
5071
5072
5073 for {
5074 x := v_0
5075 y := v_1
5076 if v_2.Op != OpAMD64InvertFlags {
5077 break
5078 }
5079 cond := v_2.Args[0]
5080 v.reset(OpAMD64CMOVQLT)
5081 v.AddArg3(x, y, cond)
5082 return true
5083 }
5084
5085
5086 for {
5087 y := v_0
5088 if v_2.Op != OpAMD64FlagEQ {
5089 break
5090 }
5091 v.copyOf(y)
5092 return true
5093 }
5094
5095
5096 for {
5097 x := v_1
5098 if v_2.Op != OpAMD64FlagGT_UGT {
5099 break
5100 }
5101 v.copyOf(x)
5102 return true
5103 }
5104
5105
5106 for {
5107 x := v_1
5108 if v_2.Op != OpAMD64FlagGT_ULT {
5109 break
5110 }
5111 v.copyOf(x)
5112 return true
5113 }
5114
5115
5116 for {
5117 y := v_0
5118 if v_2.Op != OpAMD64FlagLT_ULT {
5119 break
5120 }
5121 v.copyOf(y)
5122 return true
5123 }
5124
5125
5126 for {
5127 y := v_0
5128 if v_2.Op != OpAMD64FlagLT_UGT {
5129 break
5130 }
5131 v.copyOf(y)
5132 return true
5133 }
5134 return false
5135 }
5136 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5137 v_2 := v.Args[2]
5138 v_1 := v.Args[1]
5139 v_0 := v.Args[0]
5140
5141
5142 for {
5143 x := v_0
5144 y := v_1
5145 if v_2.Op != OpAMD64InvertFlags {
5146 break
5147 }
5148 cond := v_2.Args[0]
5149 v.reset(OpAMD64CMOVQCS)
5150 v.AddArg3(x, y, cond)
5151 return true
5152 }
5153
5154
5155 for {
5156 y := v_0
5157 if v_2.Op != OpAMD64FlagEQ {
5158 break
5159 }
5160 v.copyOf(y)
5161 return true
5162 }
5163
5164
5165 for {
5166 x := v_1
5167 if v_2.Op != OpAMD64FlagGT_UGT {
5168 break
5169 }
5170 v.copyOf(x)
5171 return true
5172 }
5173
5174
5175 for {
5176 y := v_0
5177 if v_2.Op != OpAMD64FlagGT_ULT {
5178 break
5179 }
5180 v.copyOf(y)
5181 return true
5182 }
5183
5184
5185 for {
5186 y := v_0
5187 if v_2.Op != OpAMD64FlagLT_ULT {
5188 break
5189 }
5190 v.copyOf(y)
5191 return true
5192 }
5193
5194
5195 for {
5196 x := v_1
5197 if v_2.Op != OpAMD64FlagLT_UGT {
5198 break
5199 }
5200 v.copyOf(x)
5201 return true
5202 }
5203 return false
5204 }
5205 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5206 v_2 := v.Args[2]
5207 v_1 := v.Args[1]
5208 v_0 := v.Args[0]
5209
5210
5211 for {
5212 x := v_0
5213 y := v_1
5214 if v_2.Op != OpAMD64InvertFlags {
5215 break
5216 }
5217 cond := v_2.Args[0]
5218 v.reset(OpAMD64CMOVQGE)
5219 v.AddArg3(x, y, cond)
5220 return true
5221 }
5222
5223
5224 for {
5225 x := v_1
5226 if v_2.Op != OpAMD64FlagEQ {
5227 break
5228 }
5229 v.copyOf(x)
5230 return true
5231 }
5232
5233
5234 for {
5235 y := v_0
5236 if v_2.Op != OpAMD64FlagGT_UGT {
5237 break
5238 }
5239 v.copyOf(y)
5240 return true
5241 }
5242
5243
5244 for {
5245 y := v_0
5246 if v_2.Op != OpAMD64FlagGT_ULT {
5247 break
5248 }
5249 v.copyOf(y)
5250 return true
5251 }
5252
5253
5254 for {
5255 x := v_1
5256 if v_2.Op != OpAMD64FlagLT_ULT {
5257 break
5258 }
5259 v.copyOf(x)
5260 return true
5261 }
5262
5263
5264 for {
5265 x := v_1
5266 if v_2.Op != OpAMD64FlagLT_UGT {
5267 break
5268 }
5269 v.copyOf(x)
5270 return true
5271 }
5272 return false
5273 }
5274 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5275 v_2 := v.Args[2]
5276 v_1 := v.Args[1]
5277 v_0 := v.Args[0]
5278
5279
5280 for {
5281 x := v_0
5282 y := v_1
5283 if v_2.Op != OpAMD64InvertFlags {
5284 break
5285 }
5286 cond := v_2.Args[0]
5287 v.reset(OpAMD64CMOVQCC)
5288 v.AddArg3(x, y, cond)
5289 return true
5290 }
5291
5292
5293 for {
5294 x := v_1
5295 if v_2.Op != OpAMD64FlagEQ {
5296 break
5297 }
5298 v.copyOf(x)
5299 return true
5300 }
5301
5302
5303 for {
5304 y := v_0
5305 if v_2.Op != OpAMD64FlagGT_UGT {
5306 break
5307 }
5308 v.copyOf(y)
5309 return true
5310 }
5311
5312
5313 for {
5314 x := v_1
5315 if v_2.Op != OpAMD64FlagGT_ULT {
5316 break
5317 }
5318 v.copyOf(x)
5319 return true
5320 }
5321
5322
5323 for {
5324 x := v_1
5325 if v_2.Op != OpAMD64FlagLT_ULT {
5326 break
5327 }
5328 v.copyOf(x)
5329 return true
5330 }
5331
5332
5333 for {
5334 y := v_0
5335 if v_2.Op != OpAMD64FlagLT_UGT {
5336 break
5337 }
5338 v.copyOf(y)
5339 return true
5340 }
5341 return false
5342 }
5343 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5344 v_2 := v.Args[2]
5345 v_1 := v.Args[1]
5346 v_0 := v.Args[0]
5347
5348
5349 for {
5350 x := v_0
5351 y := v_1
5352 if v_2.Op != OpAMD64InvertFlags {
5353 break
5354 }
5355 cond := v_2.Args[0]
5356 v.reset(OpAMD64CMOVQGT)
5357 v.AddArg3(x, y, cond)
5358 return true
5359 }
5360
5361
5362 for {
5363 y := v_0
5364 if v_2.Op != OpAMD64FlagEQ {
5365 break
5366 }
5367 v.copyOf(y)
5368 return true
5369 }
5370
5371
5372 for {
5373 y := v_0
5374 if v_2.Op != OpAMD64FlagGT_UGT {
5375 break
5376 }
5377 v.copyOf(y)
5378 return true
5379 }
5380
5381
5382 for {
5383 y := v_0
5384 if v_2.Op != OpAMD64FlagGT_ULT {
5385 break
5386 }
5387 v.copyOf(y)
5388 return true
5389 }
5390
5391
5392 for {
5393 x := v_1
5394 if v_2.Op != OpAMD64FlagLT_ULT {
5395 break
5396 }
5397 v.copyOf(x)
5398 return true
5399 }
5400
5401
5402 for {
5403 x := v_1
5404 if v_2.Op != OpAMD64FlagLT_UGT {
5405 break
5406 }
5407 v.copyOf(x)
5408 return true
5409 }
5410 return false
5411 }
5412 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5413 v_2 := v.Args[2]
5414 v_1 := v.Args[1]
5415 v_0 := v.Args[0]
5416 b := v.Block
5417
5418
5419 for {
5420 x := v_0
5421 y := v_1
5422 if v_2.Op != OpAMD64InvertFlags {
5423 break
5424 }
5425 cond := v_2.Args[0]
5426 v.reset(OpAMD64CMOVQNE)
5427 v.AddArg3(x, y, cond)
5428 return true
5429 }
5430
5431
5432 for {
5433 y := v_0
5434 if v_2.Op != OpAMD64FlagEQ {
5435 break
5436 }
5437 v.copyOf(y)
5438 return true
5439 }
5440
5441
5442 for {
5443 x := v_1
5444 if v_2.Op != OpAMD64FlagGT_UGT {
5445 break
5446 }
5447 v.copyOf(x)
5448 return true
5449 }
5450
5451
5452 for {
5453 x := v_1
5454 if v_2.Op != OpAMD64FlagGT_ULT {
5455 break
5456 }
5457 v.copyOf(x)
5458 return true
5459 }
5460
5461
5462 for {
5463 x := v_1
5464 if v_2.Op != OpAMD64FlagLT_ULT {
5465 break
5466 }
5467 v.copyOf(x)
5468 return true
5469 }
5470
5471
5472 for {
5473 x := v_1
5474 if v_2.Op != OpAMD64FlagLT_UGT {
5475 break
5476 }
5477 v.copyOf(x)
5478 return true
5479 }
5480
5481
5482 for {
5483 x := v_0
5484 y := v_1
5485 if v_2.Op != OpAMD64TESTQ {
5486 break
5487 }
5488 _ = v_2.Args[1]
5489 v_2_0 := v_2.Args[0]
5490 v_2_1 := v_2.Args[1]
5491 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5492 s := v_2_0
5493 if s.Op != OpSelect0 {
5494 continue
5495 }
5496 blsr := s.Args[0]
5497 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
5498 continue
5499 }
5500 v.reset(OpAMD64CMOVQNE)
5501 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5502 v0.AddArg(blsr)
5503 v.AddArg3(x, y, v0)
5504 return true
5505 }
5506 break
5507 }
5508
5509
5510 for {
5511 x := v_0
5512 y := v_1
5513 if v_2.Op != OpAMD64TESTL {
5514 break
5515 }
5516 _ = v_2.Args[1]
5517 v_2_0 := v_2.Args[0]
5518 v_2_1 := v_2.Args[1]
5519 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5520 s := v_2_0
5521 if s.Op != OpSelect0 {
5522 continue
5523 }
5524 blsr := s.Args[0]
5525 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
5526 continue
5527 }
5528 v.reset(OpAMD64CMOVQNE)
5529 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5530 v0.AddArg(blsr)
5531 v.AddArg3(x, y, v0)
5532 return true
5533 }
5534 break
5535 }
5536 return false
5537 }
5538 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
5539 v_2 := v.Args[2]
5540 v_1 := v.Args[1]
5541 v_0 := v.Args[0]
5542
5543
5544 for {
5545 x := v_0
5546 y := v_1
5547 if v_2.Op != OpAMD64InvertFlags {
5548 break
5549 }
5550 cond := v_2.Args[0]
5551 v.reset(OpAMD64CMOVWLS)
5552 v.AddArg3(x, y, cond)
5553 return true
5554 }
5555
5556
5557 for {
5558 x := v_1
5559 if v_2.Op != OpAMD64FlagEQ {
5560 break
5561 }
5562 v.copyOf(x)
5563 return true
5564 }
5565
5566
5567 for {
5568 x := v_1
5569 if v_2.Op != OpAMD64FlagGT_UGT {
5570 break
5571 }
5572 v.copyOf(x)
5573 return true
5574 }
5575
5576
5577 for {
5578 y := v_0
5579 if v_2.Op != OpAMD64FlagGT_ULT {
5580 break
5581 }
5582 v.copyOf(y)
5583 return true
5584 }
5585
5586
5587 for {
5588 y := v_0
5589 if v_2.Op != OpAMD64FlagLT_ULT {
5590 break
5591 }
5592 v.copyOf(y)
5593 return true
5594 }
5595
5596
5597 for {
5598 x := v_1
5599 if v_2.Op != OpAMD64FlagLT_UGT {
5600 break
5601 }
5602 v.copyOf(x)
5603 return true
5604 }
5605 return false
5606 }
5607 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
5608 v_2 := v.Args[2]
5609 v_1 := v.Args[1]
5610 v_0 := v.Args[0]
5611
5612
5613 for {
5614 x := v_0
5615 y := v_1
5616 if v_2.Op != OpAMD64InvertFlags {
5617 break
5618 }
5619 cond := v_2.Args[0]
5620 v.reset(OpAMD64CMOVWHI)
5621 v.AddArg3(x, y, cond)
5622 return true
5623 }
5624
5625
5626 for {
5627 y := v_0
5628 if v_2.Op != OpAMD64FlagEQ {
5629 break
5630 }
5631 v.copyOf(y)
5632 return true
5633 }
5634
5635
5636 for {
5637 y := v_0
5638 if v_2.Op != OpAMD64FlagGT_UGT {
5639 break
5640 }
5641 v.copyOf(y)
5642 return true
5643 }
5644
5645
5646 for {
5647 x := v_1
5648 if v_2.Op != OpAMD64FlagGT_ULT {
5649 break
5650 }
5651 v.copyOf(x)
5652 return true
5653 }
5654
5655
5656 for {
5657 x := v_1
5658 if v_2.Op != OpAMD64FlagLT_ULT {
5659 break
5660 }
5661 v.copyOf(x)
5662 return true
5663 }
5664
5665
5666 for {
5667 y := v_0
5668 if v_2.Op != OpAMD64FlagLT_UGT {
5669 break
5670 }
5671 v.copyOf(y)
5672 return true
5673 }
5674 return false
5675 }
5676 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
5677 v_2 := v.Args[2]
5678 v_1 := v.Args[1]
5679 v_0 := v.Args[0]
5680
5681
5682 for {
5683 x := v_0
5684 y := v_1
5685 if v_2.Op != OpAMD64InvertFlags {
5686 break
5687 }
5688 cond := v_2.Args[0]
5689 v.reset(OpAMD64CMOVWEQ)
5690 v.AddArg3(x, y, cond)
5691 return true
5692 }
5693
5694
5695 for {
5696 x := v_1
5697 if v_2.Op != OpAMD64FlagEQ {
5698 break
5699 }
5700 v.copyOf(x)
5701 return true
5702 }
5703
5704
5705 for {
5706 y := v_0
5707 if v_2.Op != OpAMD64FlagGT_UGT {
5708 break
5709 }
5710 v.copyOf(y)
5711 return true
5712 }
5713
5714
5715 for {
5716 y := v_0
5717 if v_2.Op != OpAMD64FlagGT_ULT {
5718 break
5719 }
5720 v.copyOf(y)
5721 return true
5722 }
5723
5724
5725 for {
5726 y := v_0
5727 if v_2.Op != OpAMD64FlagLT_ULT {
5728 break
5729 }
5730 v.copyOf(y)
5731 return true
5732 }
5733
5734
5735 for {
5736 y := v_0
5737 if v_2.Op != OpAMD64FlagLT_UGT {
5738 break
5739 }
5740 v.copyOf(y)
5741 return true
5742 }
5743 return false
5744 }
5745 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
5746 v_2 := v.Args[2]
5747 v_1 := v.Args[1]
5748 v_0 := v.Args[0]
5749
5750
5751 for {
5752 x := v_0
5753 y := v_1
5754 if v_2.Op != OpAMD64InvertFlags {
5755 break
5756 }
5757 cond := v_2.Args[0]
5758 v.reset(OpAMD64CMOVWLE)
5759 v.AddArg3(x, y, cond)
5760 return true
5761 }
5762
5763
5764 for {
5765 x := v_1
5766 if v_2.Op != OpAMD64FlagEQ {
5767 break
5768 }
5769 v.copyOf(x)
5770 return true
5771 }
5772
5773
5774 for {
5775 x := v_1
5776 if v_2.Op != OpAMD64FlagGT_UGT {
5777 break
5778 }
5779 v.copyOf(x)
5780 return true
5781 }
5782
5783
5784 for {
5785 x := v_1
5786 if v_2.Op != OpAMD64FlagGT_ULT {
5787 break
5788 }
5789 v.copyOf(x)
5790 return true
5791 }
5792
5793
5794 for {
5795 y := v_0
5796 if v_2.Op != OpAMD64FlagLT_ULT {
5797 break
5798 }
5799 v.copyOf(y)
5800 return true
5801 }
5802
5803
5804 for {
5805 y := v_0
5806 if v_2.Op != OpAMD64FlagLT_UGT {
5807 break
5808 }
5809 v.copyOf(y)
5810 return true
5811 }
5812 return false
5813 }
5814 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
5815 v_2 := v.Args[2]
5816 v_1 := v.Args[1]
5817 v_0 := v.Args[0]
5818
5819
5820 for {
5821 x := v_0
5822 y := v_1
5823 if v_2.Op != OpAMD64InvertFlags {
5824 break
5825 }
5826 cond := v_2.Args[0]
5827 v.reset(OpAMD64CMOVWLT)
5828 v.AddArg3(x, y, cond)
5829 return true
5830 }
5831
5832
5833 for {
5834 y := v_0
5835 if v_2.Op != OpAMD64FlagEQ {
5836 break
5837 }
5838 v.copyOf(y)
5839 return true
5840 }
5841
5842
5843 for {
5844 x := v_1
5845 if v_2.Op != OpAMD64FlagGT_UGT {
5846 break
5847 }
5848 v.copyOf(x)
5849 return true
5850 }
5851
5852
5853 for {
5854 x := v_1
5855 if v_2.Op != OpAMD64FlagGT_ULT {
5856 break
5857 }
5858 v.copyOf(x)
5859 return true
5860 }
5861
5862
5863 for {
5864 y := v_0
5865 if v_2.Op != OpAMD64FlagLT_ULT {
5866 break
5867 }
5868 v.copyOf(y)
5869 return true
5870 }
5871
5872
5873 for {
5874 y := v_0
5875 if v_2.Op != OpAMD64FlagLT_UGT {
5876 break
5877 }
5878 v.copyOf(y)
5879 return true
5880 }
5881 return false
5882 }
5883 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
5884 v_2 := v.Args[2]
5885 v_1 := v.Args[1]
5886 v_0 := v.Args[0]
5887
5888
5889 for {
5890 x := v_0
5891 y := v_1
5892 if v_2.Op != OpAMD64InvertFlags {
5893 break
5894 }
5895 cond := v_2.Args[0]
5896 v.reset(OpAMD64CMOVWCS)
5897 v.AddArg3(x, y, cond)
5898 return true
5899 }
5900
5901
5902 for {
5903 y := v_0
5904 if v_2.Op != OpAMD64FlagEQ {
5905 break
5906 }
5907 v.copyOf(y)
5908 return true
5909 }
5910
5911
5912 for {
5913 x := v_1
5914 if v_2.Op != OpAMD64FlagGT_UGT {
5915 break
5916 }
5917 v.copyOf(x)
5918 return true
5919 }
5920
5921
5922 for {
5923 y := v_0
5924 if v_2.Op != OpAMD64FlagGT_ULT {
5925 break
5926 }
5927 v.copyOf(y)
5928 return true
5929 }
5930
5931
5932 for {
5933 y := v_0
5934 if v_2.Op != OpAMD64FlagLT_ULT {
5935 break
5936 }
5937 v.copyOf(y)
5938 return true
5939 }
5940
5941
5942 for {
5943 x := v_1
5944 if v_2.Op != OpAMD64FlagLT_UGT {
5945 break
5946 }
5947 v.copyOf(x)
5948 return true
5949 }
5950 return false
5951 }
5952 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
5953 v_2 := v.Args[2]
5954 v_1 := v.Args[1]
5955 v_0 := v.Args[0]
5956
5957
5958 for {
5959 x := v_0
5960 y := v_1
5961 if v_2.Op != OpAMD64InvertFlags {
5962 break
5963 }
5964 cond := v_2.Args[0]
5965 v.reset(OpAMD64CMOVWGE)
5966 v.AddArg3(x, y, cond)
5967 return true
5968 }
5969
5970
5971 for {
5972 x := v_1
5973 if v_2.Op != OpAMD64FlagEQ {
5974 break
5975 }
5976 v.copyOf(x)
5977 return true
5978 }
5979
5980
5981 for {
5982 y := v_0
5983 if v_2.Op != OpAMD64FlagGT_UGT {
5984 break
5985 }
5986 v.copyOf(y)
5987 return true
5988 }
5989
5990
5991 for {
5992 y := v_0
5993 if v_2.Op != OpAMD64FlagGT_ULT {
5994 break
5995 }
5996 v.copyOf(y)
5997 return true
5998 }
5999
6000
6001 for {
6002 x := v_1
6003 if v_2.Op != OpAMD64FlagLT_ULT {
6004 break
6005 }
6006 v.copyOf(x)
6007 return true
6008 }
6009
6010
6011 for {
6012 x := v_1
6013 if v_2.Op != OpAMD64FlagLT_UGT {
6014 break
6015 }
6016 v.copyOf(x)
6017 return true
6018 }
6019 return false
6020 }
6021 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6022 v_2 := v.Args[2]
6023 v_1 := v.Args[1]
6024 v_0 := v.Args[0]
6025
6026
6027 for {
6028 x := v_0
6029 y := v_1
6030 if v_2.Op != OpAMD64InvertFlags {
6031 break
6032 }
6033 cond := v_2.Args[0]
6034 v.reset(OpAMD64CMOVWCC)
6035 v.AddArg3(x, y, cond)
6036 return true
6037 }
6038
6039
6040 for {
6041 x := v_1
6042 if v_2.Op != OpAMD64FlagEQ {
6043 break
6044 }
6045 v.copyOf(x)
6046 return true
6047 }
6048
6049
6050 for {
6051 y := v_0
6052 if v_2.Op != OpAMD64FlagGT_UGT {
6053 break
6054 }
6055 v.copyOf(y)
6056 return true
6057 }
6058
6059
6060 for {
6061 x := v_1
6062 if v_2.Op != OpAMD64FlagGT_ULT {
6063 break
6064 }
6065 v.copyOf(x)
6066 return true
6067 }
6068
6069
6070 for {
6071 x := v_1
6072 if v_2.Op != OpAMD64FlagLT_ULT {
6073 break
6074 }
6075 v.copyOf(x)
6076 return true
6077 }
6078
6079
6080 for {
6081 y := v_0
6082 if v_2.Op != OpAMD64FlagLT_UGT {
6083 break
6084 }
6085 v.copyOf(y)
6086 return true
6087 }
6088 return false
6089 }
6090 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6091 v_2 := v.Args[2]
6092 v_1 := v.Args[1]
6093 v_0 := v.Args[0]
6094
6095
6096 for {
6097 x := v_0
6098 y := v_1
6099 if v_2.Op != OpAMD64InvertFlags {
6100 break
6101 }
6102 cond := v_2.Args[0]
6103 v.reset(OpAMD64CMOVWGT)
6104 v.AddArg3(x, y, cond)
6105 return true
6106 }
6107
6108
6109 for {
6110 y := v_0
6111 if v_2.Op != OpAMD64FlagEQ {
6112 break
6113 }
6114 v.copyOf(y)
6115 return true
6116 }
6117
6118
6119 for {
6120 y := v_0
6121 if v_2.Op != OpAMD64FlagGT_UGT {
6122 break
6123 }
6124 v.copyOf(y)
6125 return true
6126 }
6127
6128
6129 for {
6130 y := v_0
6131 if v_2.Op != OpAMD64FlagGT_ULT {
6132 break
6133 }
6134 v.copyOf(y)
6135 return true
6136 }
6137
6138
6139 for {
6140 x := v_1
6141 if v_2.Op != OpAMD64FlagLT_ULT {
6142 break
6143 }
6144 v.copyOf(x)
6145 return true
6146 }
6147
6148
6149 for {
6150 x := v_1
6151 if v_2.Op != OpAMD64FlagLT_UGT {
6152 break
6153 }
6154 v.copyOf(x)
6155 return true
6156 }
6157 return false
6158 }
6159 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6160 v_2 := v.Args[2]
6161 v_1 := v.Args[1]
6162 v_0 := v.Args[0]
6163
6164
6165 for {
6166 x := v_0
6167 y := v_1
6168 if v_2.Op != OpAMD64InvertFlags {
6169 break
6170 }
6171 cond := v_2.Args[0]
6172 v.reset(OpAMD64CMOVWNE)
6173 v.AddArg3(x, y, cond)
6174 return true
6175 }
6176
6177
6178 for {
6179 y := v_0
6180 if v_2.Op != OpAMD64FlagEQ {
6181 break
6182 }
6183 v.copyOf(y)
6184 return true
6185 }
6186
6187
6188 for {
6189 x := v_1
6190 if v_2.Op != OpAMD64FlagGT_UGT {
6191 break
6192 }
6193 v.copyOf(x)
6194 return true
6195 }
6196
6197
6198 for {
6199 x := v_1
6200 if v_2.Op != OpAMD64FlagGT_ULT {
6201 break
6202 }
6203 v.copyOf(x)
6204 return true
6205 }
6206
6207
6208 for {
6209 x := v_1
6210 if v_2.Op != OpAMD64FlagLT_ULT {
6211 break
6212 }
6213 v.copyOf(x)
6214 return true
6215 }
6216
6217
6218 for {
6219 x := v_1
6220 if v_2.Op != OpAMD64FlagLT_UGT {
6221 break
6222 }
6223 v.copyOf(x)
6224 return true
6225 }
6226 return false
6227 }
6228 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6229 v_1 := v.Args[1]
6230 v_0 := v.Args[0]
6231 b := v.Block
6232
6233
6234 for {
6235 x := v_0
6236 if v_1.Op != OpAMD64MOVLconst {
6237 break
6238 }
6239 c := auxIntToInt32(v_1.AuxInt)
6240 v.reset(OpAMD64CMPBconst)
6241 v.AuxInt = int8ToAuxInt(int8(c))
6242 v.AddArg(x)
6243 return true
6244 }
6245
6246
6247 for {
6248 if v_0.Op != OpAMD64MOVLconst {
6249 break
6250 }
6251 c := auxIntToInt32(v_0.AuxInt)
6252 x := v_1
6253 v.reset(OpAMD64InvertFlags)
6254 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6255 v0.AuxInt = int8ToAuxInt(int8(c))
6256 v0.AddArg(x)
6257 v.AddArg(v0)
6258 return true
6259 }
6260
6261
6262
6263 for {
6264 x := v_0
6265 y := v_1
6266 if !(canonLessThan(x, y)) {
6267 break
6268 }
6269 v.reset(OpAMD64InvertFlags)
6270 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6271 v0.AddArg2(y, x)
6272 v.AddArg(v0)
6273 return true
6274 }
6275
6276
6277
6278 for {
6279 l := v_0
6280 if l.Op != OpAMD64MOVBload {
6281 break
6282 }
6283 off := auxIntToInt32(l.AuxInt)
6284 sym := auxToSym(l.Aux)
6285 mem := l.Args[1]
6286 ptr := l.Args[0]
6287 x := v_1
6288 if !(canMergeLoad(v, l) && clobber(l)) {
6289 break
6290 }
6291 v.reset(OpAMD64CMPBload)
6292 v.AuxInt = int32ToAuxInt(off)
6293 v.Aux = symToAux(sym)
6294 v.AddArg3(ptr, x, mem)
6295 return true
6296 }
6297
6298
6299
6300 for {
6301 x := v_0
6302 l := v_1
6303 if l.Op != OpAMD64MOVBload {
6304 break
6305 }
6306 off := auxIntToInt32(l.AuxInt)
6307 sym := auxToSym(l.Aux)
6308 mem := l.Args[1]
6309 ptr := l.Args[0]
6310 if !(canMergeLoad(v, l) && clobber(l)) {
6311 break
6312 }
6313 v.reset(OpAMD64InvertFlags)
6314 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6315 v0.AuxInt = int32ToAuxInt(off)
6316 v0.Aux = symToAux(sym)
6317 v0.AddArg3(ptr, x, mem)
6318 v.AddArg(v0)
6319 return true
6320 }
6321 return false
6322 }
6323 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6324 v_0 := v.Args[0]
6325 b := v.Block
6326
6327
6328
6329 for {
6330 y := auxIntToInt8(v.AuxInt)
6331 if v_0.Op != OpAMD64MOVLconst {
6332 break
6333 }
6334 x := auxIntToInt32(v_0.AuxInt)
6335 if !(int8(x) == y) {
6336 break
6337 }
6338 v.reset(OpAMD64FlagEQ)
6339 return true
6340 }
6341
6342
6343
6344 for {
6345 y := auxIntToInt8(v.AuxInt)
6346 if v_0.Op != OpAMD64MOVLconst {
6347 break
6348 }
6349 x := auxIntToInt32(v_0.AuxInt)
6350 if !(int8(x) < y && uint8(x) < uint8(y)) {
6351 break
6352 }
6353 v.reset(OpAMD64FlagLT_ULT)
6354 return true
6355 }
6356
6357
6358
6359 for {
6360 y := auxIntToInt8(v.AuxInt)
6361 if v_0.Op != OpAMD64MOVLconst {
6362 break
6363 }
6364 x := auxIntToInt32(v_0.AuxInt)
6365 if !(int8(x) < y && uint8(x) > uint8(y)) {
6366 break
6367 }
6368 v.reset(OpAMD64FlagLT_UGT)
6369 return true
6370 }
6371
6372
6373
6374 for {
6375 y := auxIntToInt8(v.AuxInt)
6376 if v_0.Op != OpAMD64MOVLconst {
6377 break
6378 }
6379 x := auxIntToInt32(v_0.AuxInt)
6380 if !(int8(x) > y && uint8(x) < uint8(y)) {
6381 break
6382 }
6383 v.reset(OpAMD64FlagGT_ULT)
6384 return true
6385 }
6386
6387
6388
6389 for {
6390 y := auxIntToInt8(v.AuxInt)
6391 if v_0.Op != OpAMD64MOVLconst {
6392 break
6393 }
6394 x := auxIntToInt32(v_0.AuxInt)
6395 if !(int8(x) > y && uint8(x) > uint8(y)) {
6396 break
6397 }
6398 v.reset(OpAMD64FlagGT_UGT)
6399 return true
6400 }
6401
6402
6403
6404 for {
6405 n := auxIntToInt8(v.AuxInt)
6406 if v_0.Op != OpAMD64ANDLconst {
6407 break
6408 }
6409 m := auxIntToInt32(v_0.AuxInt)
6410 if !(0 <= int8(m) && int8(m) < n) {
6411 break
6412 }
6413 v.reset(OpAMD64FlagLT_ULT)
6414 return true
6415 }
6416
6417
6418
6419 for {
6420 if auxIntToInt8(v.AuxInt) != 0 {
6421 break
6422 }
6423 a := v_0
6424 if a.Op != OpAMD64ANDL {
6425 break
6426 }
6427 y := a.Args[1]
6428 x := a.Args[0]
6429 if !(a.Uses == 1) {
6430 break
6431 }
6432 v.reset(OpAMD64TESTB)
6433 v.AddArg2(x, y)
6434 return true
6435 }
6436
6437
6438
6439 for {
6440 if auxIntToInt8(v.AuxInt) != 0 {
6441 break
6442 }
6443 a := v_0
6444 if a.Op != OpAMD64ANDLconst {
6445 break
6446 }
6447 c := auxIntToInt32(a.AuxInt)
6448 x := a.Args[0]
6449 if !(a.Uses == 1) {
6450 break
6451 }
6452 v.reset(OpAMD64TESTBconst)
6453 v.AuxInt = int8ToAuxInt(int8(c))
6454 v.AddArg(x)
6455 return true
6456 }
6457
6458
6459 for {
6460 if auxIntToInt8(v.AuxInt) != 0 {
6461 break
6462 }
6463 x := v_0
6464 v.reset(OpAMD64TESTB)
6465 v.AddArg2(x, x)
6466 return true
6467 }
6468
6469
6470
6471 for {
6472 c := auxIntToInt8(v.AuxInt)
6473 l := v_0
6474 if l.Op != OpAMD64MOVBload {
6475 break
6476 }
6477 off := auxIntToInt32(l.AuxInt)
6478 sym := auxToSym(l.Aux)
6479 mem := l.Args[1]
6480 ptr := l.Args[0]
6481 if !(l.Uses == 1 && clobber(l)) {
6482 break
6483 }
6484 b = l.Block
6485 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6486 v.copyOf(v0)
6487 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
6488 v0.Aux = symToAux(sym)
6489 v0.AddArg2(ptr, mem)
6490 return true
6491 }
6492 return false
6493 }
6494 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6495 v_1 := v.Args[1]
6496 v_0 := v.Args[0]
6497
6498
6499
6500 for {
6501 valoff1 := auxIntToValAndOff(v.AuxInt)
6502 sym := auxToSym(v.Aux)
6503 if v_0.Op != OpAMD64ADDQconst {
6504 break
6505 }
6506 off2 := auxIntToInt32(v_0.AuxInt)
6507 base := v_0.Args[0]
6508 mem := v_1
6509 if !(ValAndOff(valoff1).canAdd32(off2)) {
6510 break
6511 }
6512 v.reset(OpAMD64CMPBconstload)
6513 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6514 v.Aux = symToAux(sym)
6515 v.AddArg2(base, mem)
6516 return true
6517 }
6518
6519
6520
6521 for {
6522 valoff1 := auxIntToValAndOff(v.AuxInt)
6523 sym1 := auxToSym(v.Aux)
6524 if v_0.Op != OpAMD64LEAQ {
6525 break
6526 }
6527 off2 := auxIntToInt32(v_0.AuxInt)
6528 sym2 := auxToSym(v_0.Aux)
6529 base := v_0.Args[0]
6530 mem := v_1
6531 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6532 break
6533 }
6534 v.reset(OpAMD64CMPBconstload)
6535 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6536 v.Aux = symToAux(mergeSym(sym1, sym2))
6537 v.AddArg2(base, mem)
6538 return true
6539 }
6540 return false
6541 }
6542 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
6543 v_2 := v.Args[2]
6544 v_1 := v.Args[1]
6545 v_0 := v.Args[0]
6546
6547
6548
6549 for {
6550 off1 := auxIntToInt32(v.AuxInt)
6551 sym := auxToSym(v.Aux)
6552 if v_0.Op != OpAMD64ADDQconst {
6553 break
6554 }
6555 off2 := auxIntToInt32(v_0.AuxInt)
6556 base := v_0.Args[0]
6557 val := v_1
6558 mem := v_2
6559 if !(is32Bit(int64(off1) + int64(off2))) {
6560 break
6561 }
6562 v.reset(OpAMD64CMPBload)
6563 v.AuxInt = int32ToAuxInt(off1 + off2)
6564 v.Aux = symToAux(sym)
6565 v.AddArg3(base, val, mem)
6566 return true
6567 }
6568
6569
6570
6571 for {
6572 off1 := auxIntToInt32(v.AuxInt)
6573 sym1 := auxToSym(v.Aux)
6574 if v_0.Op != OpAMD64LEAQ {
6575 break
6576 }
6577 off2 := auxIntToInt32(v_0.AuxInt)
6578 sym2 := auxToSym(v_0.Aux)
6579 base := v_0.Args[0]
6580 val := v_1
6581 mem := v_2
6582 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6583 break
6584 }
6585 v.reset(OpAMD64CMPBload)
6586 v.AuxInt = int32ToAuxInt(off1 + off2)
6587 v.Aux = symToAux(mergeSym(sym1, sym2))
6588 v.AddArg3(base, val, mem)
6589 return true
6590 }
6591
6592
6593 for {
6594 off := auxIntToInt32(v.AuxInt)
6595 sym := auxToSym(v.Aux)
6596 ptr := v_0
6597 if v_1.Op != OpAMD64MOVLconst {
6598 break
6599 }
6600 c := auxIntToInt32(v_1.AuxInt)
6601 mem := v_2
6602 v.reset(OpAMD64CMPBconstload)
6603 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
6604 v.Aux = symToAux(sym)
6605 v.AddArg2(ptr, mem)
6606 return true
6607 }
6608 return false
6609 }
6610 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
6611 v_1 := v.Args[1]
6612 v_0 := v.Args[0]
6613 b := v.Block
6614
6615
6616 for {
6617 x := v_0
6618 if v_1.Op != OpAMD64MOVLconst {
6619 break
6620 }
6621 c := auxIntToInt32(v_1.AuxInt)
6622 v.reset(OpAMD64CMPLconst)
6623 v.AuxInt = int32ToAuxInt(c)
6624 v.AddArg(x)
6625 return true
6626 }
6627
6628
6629 for {
6630 if v_0.Op != OpAMD64MOVLconst {
6631 break
6632 }
6633 c := auxIntToInt32(v_0.AuxInt)
6634 x := v_1
6635 v.reset(OpAMD64InvertFlags)
6636 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
6637 v0.AuxInt = int32ToAuxInt(c)
6638 v0.AddArg(x)
6639 v.AddArg(v0)
6640 return true
6641 }
6642
6643
6644
6645 for {
6646 x := v_0
6647 y := v_1
6648 if !(canonLessThan(x, y)) {
6649 break
6650 }
6651 v.reset(OpAMD64InvertFlags)
6652 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
6653 v0.AddArg2(y, x)
6654 v.AddArg(v0)
6655 return true
6656 }
6657
6658
6659
6660 for {
6661 l := v_0
6662 if l.Op != OpAMD64MOVLload {
6663 break
6664 }
6665 off := auxIntToInt32(l.AuxInt)
6666 sym := auxToSym(l.Aux)
6667 mem := l.Args[1]
6668 ptr := l.Args[0]
6669 x := v_1
6670 if !(canMergeLoad(v, l) && clobber(l)) {
6671 break
6672 }
6673 v.reset(OpAMD64CMPLload)
6674 v.AuxInt = int32ToAuxInt(off)
6675 v.Aux = symToAux(sym)
6676 v.AddArg3(ptr, x, mem)
6677 return true
6678 }
6679
6680
6681
6682 for {
6683 x := v_0
6684 l := v_1
6685 if l.Op != OpAMD64MOVLload {
6686 break
6687 }
6688 off := auxIntToInt32(l.AuxInt)
6689 sym := auxToSym(l.Aux)
6690 mem := l.Args[1]
6691 ptr := l.Args[0]
6692 if !(canMergeLoad(v, l) && clobber(l)) {
6693 break
6694 }
6695 v.reset(OpAMD64InvertFlags)
6696 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
6697 v0.AuxInt = int32ToAuxInt(off)
6698 v0.Aux = symToAux(sym)
6699 v0.AddArg3(ptr, x, mem)
6700 v.AddArg(v0)
6701 return true
6702 }
6703 return false
6704 }
6705 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
6706 v_0 := v.Args[0]
6707 b := v.Block
6708
6709
6710
6711 for {
6712 y := auxIntToInt32(v.AuxInt)
6713 if v_0.Op != OpAMD64MOVLconst {
6714 break
6715 }
6716 x := auxIntToInt32(v_0.AuxInt)
6717 if !(x == y) {
6718 break
6719 }
6720 v.reset(OpAMD64FlagEQ)
6721 return true
6722 }
6723
6724
6725
6726 for {
6727 y := auxIntToInt32(v.AuxInt)
6728 if v_0.Op != OpAMD64MOVLconst {
6729 break
6730 }
6731 x := auxIntToInt32(v_0.AuxInt)
6732 if !(x < y && uint32(x) < uint32(y)) {
6733 break
6734 }
6735 v.reset(OpAMD64FlagLT_ULT)
6736 return true
6737 }
6738
6739
6740
6741 for {
6742 y := auxIntToInt32(v.AuxInt)
6743 if v_0.Op != OpAMD64MOVLconst {
6744 break
6745 }
6746 x := auxIntToInt32(v_0.AuxInt)
6747 if !(x < y && uint32(x) > uint32(y)) {
6748 break
6749 }
6750 v.reset(OpAMD64FlagLT_UGT)
6751 return true
6752 }
6753
6754
6755
6756 for {
6757 y := auxIntToInt32(v.AuxInt)
6758 if v_0.Op != OpAMD64MOVLconst {
6759 break
6760 }
6761 x := auxIntToInt32(v_0.AuxInt)
6762 if !(x > y && uint32(x) < uint32(y)) {
6763 break
6764 }
6765 v.reset(OpAMD64FlagGT_ULT)
6766 return true
6767 }
6768
6769
6770
6771 for {
6772 y := auxIntToInt32(v.AuxInt)
6773 if v_0.Op != OpAMD64MOVLconst {
6774 break
6775 }
6776 x := auxIntToInt32(v_0.AuxInt)
6777 if !(x > y && uint32(x) > uint32(y)) {
6778 break
6779 }
6780 v.reset(OpAMD64FlagGT_UGT)
6781 return true
6782 }
6783
6784
6785
6786 for {
6787 n := auxIntToInt32(v.AuxInt)
6788 if v_0.Op != OpAMD64SHRLconst {
6789 break
6790 }
6791 c := auxIntToInt8(v_0.AuxInt)
6792 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
6793 break
6794 }
6795 v.reset(OpAMD64FlagLT_ULT)
6796 return true
6797 }
6798
6799
6800
6801 for {
6802 n := auxIntToInt32(v.AuxInt)
6803 if v_0.Op != OpAMD64ANDLconst {
6804 break
6805 }
6806 m := auxIntToInt32(v_0.AuxInt)
6807 if !(0 <= m && m < n) {
6808 break
6809 }
6810 v.reset(OpAMD64FlagLT_ULT)
6811 return true
6812 }
6813
6814
6815
6816 for {
6817 if auxIntToInt32(v.AuxInt) != 0 {
6818 break
6819 }
6820 a := v_0
6821 if a.Op != OpAMD64ANDL {
6822 break
6823 }
6824 y := a.Args[1]
6825 x := a.Args[0]
6826 if !(a.Uses == 1) {
6827 break
6828 }
6829 v.reset(OpAMD64TESTL)
6830 v.AddArg2(x, y)
6831 return true
6832 }
6833
6834
6835
6836 for {
6837 if auxIntToInt32(v.AuxInt) != 0 {
6838 break
6839 }
6840 a := v_0
6841 if a.Op != OpAMD64ANDLconst {
6842 break
6843 }
6844 c := auxIntToInt32(a.AuxInt)
6845 x := a.Args[0]
6846 if !(a.Uses == 1) {
6847 break
6848 }
6849 v.reset(OpAMD64TESTLconst)
6850 v.AuxInt = int32ToAuxInt(c)
6851 v.AddArg(x)
6852 return true
6853 }
6854
6855
6856 for {
6857 if auxIntToInt32(v.AuxInt) != 0 {
6858 break
6859 }
6860 x := v_0
6861 v.reset(OpAMD64TESTL)
6862 v.AddArg2(x, x)
6863 return true
6864 }
6865
6866
6867
6868 for {
6869 c := auxIntToInt32(v.AuxInt)
6870 l := v_0
6871 if l.Op != OpAMD64MOVLload {
6872 break
6873 }
6874 off := auxIntToInt32(l.AuxInt)
6875 sym := auxToSym(l.Aux)
6876 mem := l.Args[1]
6877 ptr := l.Args[0]
6878 if !(l.Uses == 1 && clobber(l)) {
6879 break
6880 }
6881 b = l.Block
6882 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
6883 v.copyOf(v0)
6884 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
6885 v0.Aux = symToAux(sym)
6886 v0.AddArg2(ptr, mem)
6887 return true
6888 }
6889 return false
6890 }
6891 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
6892 v_1 := v.Args[1]
6893 v_0 := v.Args[0]
6894
6895
6896
6897 for {
6898 valoff1 := auxIntToValAndOff(v.AuxInt)
6899 sym := auxToSym(v.Aux)
6900 if v_0.Op != OpAMD64ADDQconst {
6901 break
6902 }
6903 off2 := auxIntToInt32(v_0.AuxInt)
6904 base := v_0.Args[0]
6905 mem := v_1
6906 if !(ValAndOff(valoff1).canAdd32(off2)) {
6907 break
6908 }
6909 v.reset(OpAMD64CMPLconstload)
6910 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6911 v.Aux = symToAux(sym)
6912 v.AddArg2(base, mem)
6913 return true
6914 }
6915
6916
6917
6918 for {
6919 valoff1 := auxIntToValAndOff(v.AuxInt)
6920 sym1 := auxToSym(v.Aux)
6921 if v_0.Op != OpAMD64LEAQ {
6922 break
6923 }
6924 off2 := auxIntToInt32(v_0.AuxInt)
6925 sym2 := auxToSym(v_0.Aux)
6926 base := v_0.Args[0]
6927 mem := v_1
6928 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6929 break
6930 }
6931 v.reset(OpAMD64CMPLconstload)
6932 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6933 v.Aux = symToAux(mergeSym(sym1, sym2))
6934 v.AddArg2(base, mem)
6935 return true
6936 }
6937 return false
6938 }
6939 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
6940 v_2 := v.Args[2]
6941 v_1 := v.Args[1]
6942 v_0 := v.Args[0]
6943
6944
6945
6946 for {
6947 off1 := auxIntToInt32(v.AuxInt)
6948 sym := auxToSym(v.Aux)
6949 if v_0.Op != OpAMD64ADDQconst {
6950 break
6951 }
6952 off2 := auxIntToInt32(v_0.AuxInt)
6953 base := v_0.Args[0]
6954 val := v_1
6955 mem := v_2
6956 if !(is32Bit(int64(off1) + int64(off2))) {
6957 break
6958 }
6959 v.reset(OpAMD64CMPLload)
6960 v.AuxInt = int32ToAuxInt(off1 + off2)
6961 v.Aux = symToAux(sym)
6962 v.AddArg3(base, val, mem)
6963 return true
6964 }
6965
6966
6967
6968 for {
6969 off1 := auxIntToInt32(v.AuxInt)
6970 sym1 := auxToSym(v.Aux)
6971 if v_0.Op != OpAMD64LEAQ {
6972 break
6973 }
6974 off2 := auxIntToInt32(v_0.AuxInt)
6975 sym2 := auxToSym(v_0.Aux)
6976 base := v_0.Args[0]
6977 val := v_1
6978 mem := v_2
6979 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6980 break
6981 }
6982 v.reset(OpAMD64CMPLload)
6983 v.AuxInt = int32ToAuxInt(off1 + off2)
6984 v.Aux = symToAux(mergeSym(sym1, sym2))
6985 v.AddArg3(base, val, mem)
6986 return true
6987 }
6988
6989
6990 for {
6991 off := auxIntToInt32(v.AuxInt)
6992 sym := auxToSym(v.Aux)
6993 ptr := v_0
6994 if v_1.Op != OpAMD64MOVLconst {
6995 break
6996 }
6997 c := auxIntToInt32(v_1.AuxInt)
6998 mem := v_2
6999 v.reset(OpAMD64CMPLconstload)
7000 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7001 v.Aux = symToAux(sym)
7002 v.AddArg2(ptr, mem)
7003 return true
7004 }
7005 return false
7006 }
7007 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7008 v_1 := v.Args[1]
7009 v_0 := v.Args[0]
7010 b := v.Block
7011
7012
7013
7014 for {
7015 x := v_0
7016 if v_1.Op != OpAMD64MOVQconst {
7017 break
7018 }
7019 c := auxIntToInt64(v_1.AuxInt)
7020 if !(is32Bit(c)) {
7021 break
7022 }
7023 v.reset(OpAMD64CMPQconst)
7024 v.AuxInt = int32ToAuxInt(int32(c))
7025 v.AddArg(x)
7026 return true
7027 }
7028
7029
7030
7031 for {
7032 if v_0.Op != OpAMD64MOVQconst {
7033 break
7034 }
7035 c := auxIntToInt64(v_0.AuxInt)
7036 x := v_1
7037 if !(is32Bit(c)) {
7038 break
7039 }
7040 v.reset(OpAMD64InvertFlags)
7041 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7042 v0.AuxInt = int32ToAuxInt(int32(c))
7043 v0.AddArg(x)
7044 v.AddArg(v0)
7045 return true
7046 }
7047
7048
7049
7050 for {
7051 x := v_0
7052 y := v_1
7053 if !(canonLessThan(x, y)) {
7054 break
7055 }
7056 v.reset(OpAMD64InvertFlags)
7057 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7058 v0.AddArg2(y, x)
7059 v.AddArg(v0)
7060 return true
7061 }
7062
7063
7064
7065 for {
7066 if v_0.Op != OpAMD64MOVQconst {
7067 break
7068 }
7069 x := auxIntToInt64(v_0.AuxInt)
7070 if v_1.Op != OpAMD64MOVQconst {
7071 break
7072 }
7073 y := auxIntToInt64(v_1.AuxInt)
7074 if !(x == y) {
7075 break
7076 }
7077 v.reset(OpAMD64FlagEQ)
7078 return true
7079 }
7080
7081
7082
7083 for {
7084 if v_0.Op != OpAMD64MOVQconst {
7085 break
7086 }
7087 x := auxIntToInt64(v_0.AuxInt)
7088 if v_1.Op != OpAMD64MOVQconst {
7089 break
7090 }
7091 y := auxIntToInt64(v_1.AuxInt)
7092 if !(x < y && uint64(x) < uint64(y)) {
7093 break
7094 }
7095 v.reset(OpAMD64FlagLT_ULT)
7096 return true
7097 }
7098
7099
7100
7101 for {
7102 if v_0.Op != OpAMD64MOVQconst {
7103 break
7104 }
7105 x := auxIntToInt64(v_0.AuxInt)
7106 if v_1.Op != OpAMD64MOVQconst {
7107 break
7108 }
7109 y := auxIntToInt64(v_1.AuxInt)
7110 if !(x < y && uint64(x) > uint64(y)) {
7111 break
7112 }
7113 v.reset(OpAMD64FlagLT_UGT)
7114 return true
7115 }
7116
7117
7118
7119 for {
7120 if v_0.Op != OpAMD64MOVQconst {
7121 break
7122 }
7123 x := auxIntToInt64(v_0.AuxInt)
7124 if v_1.Op != OpAMD64MOVQconst {
7125 break
7126 }
7127 y := auxIntToInt64(v_1.AuxInt)
7128 if !(x > y && uint64(x) < uint64(y)) {
7129 break
7130 }
7131 v.reset(OpAMD64FlagGT_ULT)
7132 return true
7133 }
7134
7135
7136
7137 for {
7138 if v_0.Op != OpAMD64MOVQconst {
7139 break
7140 }
7141 x := auxIntToInt64(v_0.AuxInt)
7142 if v_1.Op != OpAMD64MOVQconst {
7143 break
7144 }
7145 y := auxIntToInt64(v_1.AuxInt)
7146 if !(x > y && uint64(x) > uint64(y)) {
7147 break
7148 }
7149 v.reset(OpAMD64FlagGT_UGT)
7150 return true
7151 }
7152
7153
7154
7155 for {
7156 l := v_0
7157 if l.Op != OpAMD64MOVQload {
7158 break
7159 }
7160 off := auxIntToInt32(l.AuxInt)
7161 sym := auxToSym(l.Aux)
7162 mem := l.Args[1]
7163 ptr := l.Args[0]
7164 x := v_1
7165 if !(canMergeLoad(v, l) && clobber(l)) {
7166 break
7167 }
7168 v.reset(OpAMD64CMPQload)
7169 v.AuxInt = int32ToAuxInt(off)
7170 v.Aux = symToAux(sym)
7171 v.AddArg3(ptr, x, mem)
7172 return true
7173 }
7174
7175
7176
7177 for {
7178 x := v_0
7179 l := v_1
7180 if l.Op != OpAMD64MOVQload {
7181 break
7182 }
7183 off := auxIntToInt32(l.AuxInt)
7184 sym := auxToSym(l.Aux)
7185 mem := l.Args[1]
7186 ptr := l.Args[0]
7187 if !(canMergeLoad(v, l) && clobber(l)) {
7188 break
7189 }
7190 v.reset(OpAMD64InvertFlags)
7191 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7192 v0.AuxInt = int32ToAuxInt(off)
7193 v0.Aux = symToAux(sym)
7194 v0.AddArg3(ptr, x, mem)
7195 v.AddArg(v0)
7196 return true
7197 }
7198 return false
7199 }
7200 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7201 v_0 := v.Args[0]
7202 b := v.Block
7203
7204
7205
7206 for {
7207 y := auxIntToInt32(v.AuxInt)
7208 if v_0.Op != OpAMD64MOVQconst {
7209 break
7210 }
7211 x := auxIntToInt64(v_0.AuxInt)
7212 if !(x == int64(y)) {
7213 break
7214 }
7215 v.reset(OpAMD64FlagEQ)
7216 return true
7217 }
7218
7219
7220
7221 for {
7222 y := auxIntToInt32(v.AuxInt)
7223 if v_0.Op != OpAMD64MOVQconst {
7224 break
7225 }
7226 x := auxIntToInt64(v_0.AuxInt)
7227 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7228 break
7229 }
7230 v.reset(OpAMD64FlagLT_ULT)
7231 return true
7232 }
7233
7234
7235
7236 for {
7237 y := auxIntToInt32(v.AuxInt)
7238 if v_0.Op != OpAMD64MOVQconst {
7239 break
7240 }
7241 x := auxIntToInt64(v_0.AuxInt)
7242 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7243 break
7244 }
7245 v.reset(OpAMD64FlagLT_UGT)
7246 return true
7247 }
7248
7249
7250
7251 for {
7252 y := auxIntToInt32(v.AuxInt)
7253 if v_0.Op != OpAMD64MOVQconst {
7254 break
7255 }
7256 x := auxIntToInt64(v_0.AuxInt)
7257 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7258 break
7259 }
7260 v.reset(OpAMD64FlagGT_ULT)
7261 return true
7262 }
7263
7264
7265
7266 for {
7267 y := auxIntToInt32(v.AuxInt)
7268 if v_0.Op != OpAMD64MOVQconst {
7269 break
7270 }
7271 x := auxIntToInt64(v_0.AuxInt)
7272 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7273 break
7274 }
7275 v.reset(OpAMD64FlagGT_UGT)
7276 return true
7277 }
7278
7279
7280
7281 for {
7282 c := auxIntToInt32(v.AuxInt)
7283 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7284 break
7285 }
7286 v.reset(OpAMD64FlagLT_ULT)
7287 return true
7288 }
7289
7290
7291
7292 for {
7293 c := auxIntToInt32(v.AuxInt)
7294 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7295 break
7296 }
7297 v.reset(OpAMD64FlagLT_ULT)
7298 return true
7299 }
7300
7301
7302
7303 for {
7304 n := auxIntToInt32(v.AuxInt)
7305 if v_0.Op != OpAMD64SHRQconst {
7306 break
7307 }
7308 c := auxIntToInt8(v_0.AuxInt)
7309 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7310 break
7311 }
7312 v.reset(OpAMD64FlagLT_ULT)
7313 return true
7314 }
7315
7316
7317
7318 for {
7319 n := auxIntToInt32(v.AuxInt)
7320 if v_0.Op != OpAMD64ANDQconst {
7321 break
7322 }
7323 m := auxIntToInt32(v_0.AuxInt)
7324 if !(0 <= m && m < n) {
7325 break
7326 }
7327 v.reset(OpAMD64FlagLT_ULT)
7328 return true
7329 }
7330
7331
7332
7333 for {
7334 n := auxIntToInt32(v.AuxInt)
7335 if v_0.Op != OpAMD64ANDLconst {
7336 break
7337 }
7338 m := auxIntToInt32(v_0.AuxInt)
7339 if !(0 <= m && m < n) {
7340 break
7341 }
7342 v.reset(OpAMD64FlagLT_ULT)
7343 return true
7344 }
7345
7346
7347
7348 for {
7349 if auxIntToInt32(v.AuxInt) != 0 {
7350 break
7351 }
7352 a := v_0
7353 if a.Op != OpAMD64ANDQ {
7354 break
7355 }
7356 y := a.Args[1]
7357 x := a.Args[0]
7358 if !(a.Uses == 1) {
7359 break
7360 }
7361 v.reset(OpAMD64TESTQ)
7362 v.AddArg2(x, y)
7363 return true
7364 }
7365
7366
7367
7368 for {
7369 if auxIntToInt32(v.AuxInt) != 0 {
7370 break
7371 }
7372 a := v_0
7373 if a.Op != OpAMD64ANDQconst {
7374 break
7375 }
7376 c := auxIntToInt32(a.AuxInt)
7377 x := a.Args[0]
7378 if !(a.Uses == 1) {
7379 break
7380 }
7381 v.reset(OpAMD64TESTQconst)
7382 v.AuxInt = int32ToAuxInt(c)
7383 v.AddArg(x)
7384 return true
7385 }
7386
7387
7388 for {
7389 if auxIntToInt32(v.AuxInt) != 0 {
7390 break
7391 }
7392 x := v_0
7393 v.reset(OpAMD64TESTQ)
7394 v.AddArg2(x, x)
7395 return true
7396 }
7397
7398
7399
7400 for {
7401 c := auxIntToInt32(v.AuxInt)
7402 l := v_0
7403 if l.Op != OpAMD64MOVQload {
7404 break
7405 }
7406 off := auxIntToInt32(l.AuxInt)
7407 sym := auxToSym(l.Aux)
7408 mem := l.Args[1]
7409 ptr := l.Args[0]
7410 if !(l.Uses == 1 && clobber(l)) {
7411 break
7412 }
7413 b = l.Block
7414 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7415 v.copyOf(v0)
7416 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7417 v0.Aux = symToAux(sym)
7418 v0.AddArg2(ptr, mem)
7419 return true
7420 }
7421 return false
7422 }
7423 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7424 v_1 := v.Args[1]
7425 v_0 := v.Args[0]
7426
7427
7428
7429 for {
7430 valoff1 := auxIntToValAndOff(v.AuxInt)
7431 sym := auxToSym(v.Aux)
7432 if v_0.Op != OpAMD64ADDQconst {
7433 break
7434 }
7435 off2 := auxIntToInt32(v_0.AuxInt)
7436 base := v_0.Args[0]
7437 mem := v_1
7438 if !(ValAndOff(valoff1).canAdd32(off2)) {
7439 break
7440 }
7441 v.reset(OpAMD64CMPQconstload)
7442 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7443 v.Aux = symToAux(sym)
7444 v.AddArg2(base, mem)
7445 return true
7446 }
7447
7448
7449
7450 for {
7451 valoff1 := auxIntToValAndOff(v.AuxInt)
7452 sym1 := auxToSym(v.Aux)
7453 if v_0.Op != OpAMD64LEAQ {
7454 break
7455 }
7456 off2 := auxIntToInt32(v_0.AuxInt)
7457 sym2 := auxToSym(v_0.Aux)
7458 base := v_0.Args[0]
7459 mem := v_1
7460 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7461 break
7462 }
7463 v.reset(OpAMD64CMPQconstload)
7464 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7465 v.Aux = symToAux(mergeSym(sym1, sym2))
7466 v.AddArg2(base, mem)
7467 return true
7468 }
7469 return false
7470 }
7471 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7472 v_2 := v.Args[2]
7473 v_1 := v.Args[1]
7474 v_0 := v.Args[0]
7475
7476
7477
7478 for {
7479 off1 := auxIntToInt32(v.AuxInt)
7480 sym := auxToSym(v.Aux)
7481 if v_0.Op != OpAMD64ADDQconst {
7482 break
7483 }
7484 off2 := auxIntToInt32(v_0.AuxInt)
7485 base := v_0.Args[0]
7486 val := v_1
7487 mem := v_2
7488 if !(is32Bit(int64(off1) + int64(off2))) {
7489 break
7490 }
7491 v.reset(OpAMD64CMPQload)
7492 v.AuxInt = int32ToAuxInt(off1 + off2)
7493 v.Aux = symToAux(sym)
7494 v.AddArg3(base, val, mem)
7495 return true
7496 }
7497
7498
7499
7500 for {
7501 off1 := auxIntToInt32(v.AuxInt)
7502 sym1 := auxToSym(v.Aux)
7503 if v_0.Op != OpAMD64LEAQ {
7504 break
7505 }
7506 off2 := auxIntToInt32(v_0.AuxInt)
7507 sym2 := auxToSym(v_0.Aux)
7508 base := v_0.Args[0]
7509 val := v_1
7510 mem := v_2
7511 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7512 break
7513 }
7514 v.reset(OpAMD64CMPQload)
7515 v.AuxInt = int32ToAuxInt(off1 + off2)
7516 v.Aux = symToAux(mergeSym(sym1, sym2))
7517 v.AddArg3(base, val, mem)
7518 return true
7519 }
7520
7521
7522
7523 for {
7524 off := auxIntToInt32(v.AuxInt)
7525 sym := auxToSym(v.Aux)
7526 ptr := v_0
7527 if v_1.Op != OpAMD64MOVQconst {
7528 break
7529 }
7530 c := auxIntToInt64(v_1.AuxInt)
7531 mem := v_2
7532 if !(validVal(c)) {
7533 break
7534 }
7535 v.reset(OpAMD64CMPQconstload)
7536 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7537 v.Aux = symToAux(sym)
7538 v.AddArg2(ptr, mem)
7539 return true
7540 }
7541 return false
7542 }
7543 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
7544 v_1 := v.Args[1]
7545 v_0 := v.Args[0]
7546 b := v.Block
7547
7548
7549 for {
7550 x := v_0
7551 if v_1.Op != OpAMD64MOVLconst {
7552 break
7553 }
7554 c := auxIntToInt32(v_1.AuxInt)
7555 v.reset(OpAMD64CMPWconst)
7556 v.AuxInt = int16ToAuxInt(int16(c))
7557 v.AddArg(x)
7558 return true
7559 }
7560
7561
7562 for {
7563 if v_0.Op != OpAMD64MOVLconst {
7564 break
7565 }
7566 c := auxIntToInt32(v_0.AuxInt)
7567 x := v_1
7568 v.reset(OpAMD64InvertFlags)
7569 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
7570 v0.AuxInt = int16ToAuxInt(int16(c))
7571 v0.AddArg(x)
7572 v.AddArg(v0)
7573 return true
7574 }
7575
7576
7577
7578 for {
7579 x := v_0
7580 y := v_1
7581 if !(canonLessThan(x, y)) {
7582 break
7583 }
7584 v.reset(OpAMD64InvertFlags)
7585 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
7586 v0.AddArg2(y, x)
7587 v.AddArg(v0)
7588 return true
7589 }
7590
7591
7592
7593 for {
7594 l := v_0
7595 if l.Op != OpAMD64MOVWload {
7596 break
7597 }
7598 off := auxIntToInt32(l.AuxInt)
7599 sym := auxToSym(l.Aux)
7600 mem := l.Args[1]
7601 ptr := l.Args[0]
7602 x := v_1
7603 if !(canMergeLoad(v, l) && clobber(l)) {
7604 break
7605 }
7606 v.reset(OpAMD64CMPWload)
7607 v.AuxInt = int32ToAuxInt(off)
7608 v.Aux = symToAux(sym)
7609 v.AddArg3(ptr, x, mem)
7610 return true
7611 }
7612
7613
7614
7615 for {
7616 x := v_0
7617 l := v_1
7618 if l.Op != OpAMD64MOVWload {
7619 break
7620 }
7621 off := auxIntToInt32(l.AuxInt)
7622 sym := auxToSym(l.Aux)
7623 mem := l.Args[1]
7624 ptr := l.Args[0]
7625 if !(canMergeLoad(v, l) && clobber(l)) {
7626 break
7627 }
7628 v.reset(OpAMD64InvertFlags)
7629 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
7630 v0.AuxInt = int32ToAuxInt(off)
7631 v0.Aux = symToAux(sym)
7632 v0.AddArg3(ptr, x, mem)
7633 v.AddArg(v0)
7634 return true
7635 }
7636 return false
7637 }
7638 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
7639 v_0 := v.Args[0]
7640 b := v.Block
7641
7642
7643
7644 for {
7645 y := auxIntToInt16(v.AuxInt)
7646 if v_0.Op != OpAMD64MOVLconst {
7647 break
7648 }
7649 x := auxIntToInt32(v_0.AuxInt)
7650 if !(int16(x) == y) {
7651 break
7652 }
7653 v.reset(OpAMD64FlagEQ)
7654 return true
7655 }
7656
7657
7658
7659 for {
7660 y := auxIntToInt16(v.AuxInt)
7661 if v_0.Op != OpAMD64MOVLconst {
7662 break
7663 }
7664 x := auxIntToInt32(v_0.AuxInt)
7665 if !(int16(x) < y && uint16(x) < uint16(y)) {
7666 break
7667 }
7668 v.reset(OpAMD64FlagLT_ULT)
7669 return true
7670 }
7671
7672
7673
7674 for {
7675 y := auxIntToInt16(v.AuxInt)
7676 if v_0.Op != OpAMD64MOVLconst {
7677 break
7678 }
7679 x := auxIntToInt32(v_0.AuxInt)
7680 if !(int16(x) < y && uint16(x) > uint16(y)) {
7681 break
7682 }
7683 v.reset(OpAMD64FlagLT_UGT)
7684 return true
7685 }
7686
7687
7688
7689 for {
7690 y := auxIntToInt16(v.AuxInt)
7691 if v_0.Op != OpAMD64MOVLconst {
7692 break
7693 }
7694 x := auxIntToInt32(v_0.AuxInt)
7695 if !(int16(x) > y && uint16(x) < uint16(y)) {
7696 break
7697 }
7698 v.reset(OpAMD64FlagGT_ULT)
7699 return true
7700 }
7701
7702
7703
7704 for {
7705 y := auxIntToInt16(v.AuxInt)
7706 if v_0.Op != OpAMD64MOVLconst {
7707 break
7708 }
7709 x := auxIntToInt32(v_0.AuxInt)
7710 if !(int16(x) > y && uint16(x) > uint16(y)) {
7711 break
7712 }
7713 v.reset(OpAMD64FlagGT_UGT)
7714 return true
7715 }
7716
7717
7718
7719 for {
7720 n := auxIntToInt16(v.AuxInt)
7721 if v_0.Op != OpAMD64ANDLconst {
7722 break
7723 }
7724 m := auxIntToInt32(v_0.AuxInt)
7725 if !(0 <= int16(m) && int16(m) < n) {
7726 break
7727 }
7728 v.reset(OpAMD64FlagLT_ULT)
7729 return true
7730 }
7731
7732
7733
7734 for {
7735 if auxIntToInt16(v.AuxInt) != 0 {
7736 break
7737 }
7738 a := v_0
7739 if a.Op != OpAMD64ANDL {
7740 break
7741 }
7742 y := a.Args[1]
7743 x := a.Args[0]
7744 if !(a.Uses == 1) {
7745 break
7746 }
7747 v.reset(OpAMD64TESTW)
7748 v.AddArg2(x, y)
7749 return true
7750 }
7751
7752
7753
7754 for {
7755 if auxIntToInt16(v.AuxInt) != 0 {
7756 break
7757 }
7758 a := v_0
7759 if a.Op != OpAMD64ANDLconst {
7760 break
7761 }
7762 c := auxIntToInt32(a.AuxInt)
7763 x := a.Args[0]
7764 if !(a.Uses == 1) {
7765 break
7766 }
7767 v.reset(OpAMD64TESTWconst)
7768 v.AuxInt = int16ToAuxInt(int16(c))
7769 v.AddArg(x)
7770 return true
7771 }
7772
7773
7774 for {
7775 if auxIntToInt16(v.AuxInt) != 0 {
7776 break
7777 }
7778 x := v_0
7779 v.reset(OpAMD64TESTW)
7780 v.AddArg2(x, x)
7781 return true
7782 }
7783
7784
7785
7786 for {
7787 c := auxIntToInt16(v.AuxInt)
7788 l := v_0
7789 if l.Op != OpAMD64MOVWload {
7790 break
7791 }
7792 off := auxIntToInt32(l.AuxInt)
7793 sym := auxToSym(l.Aux)
7794 mem := l.Args[1]
7795 ptr := l.Args[0]
7796 if !(l.Uses == 1 && clobber(l)) {
7797 break
7798 }
7799 b = l.Block
7800 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
7801 v.copyOf(v0)
7802 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7803 v0.Aux = symToAux(sym)
7804 v0.AddArg2(ptr, mem)
7805 return true
7806 }
7807 return false
7808 }
7809 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
7810 v_1 := v.Args[1]
7811 v_0 := v.Args[0]
7812
7813
7814
7815 for {
7816 valoff1 := auxIntToValAndOff(v.AuxInt)
7817 sym := auxToSym(v.Aux)
7818 if v_0.Op != OpAMD64ADDQconst {
7819 break
7820 }
7821 off2 := auxIntToInt32(v_0.AuxInt)
7822 base := v_0.Args[0]
7823 mem := v_1
7824 if !(ValAndOff(valoff1).canAdd32(off2)) {
7825 break
7826 }
7827 v.reset(OpAMD64CMPWconstload)
7828 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7829 v.Aux = symToAux(sym)
7830 v.AddArg2(base, mem)
7831 return true
7832 }
7833
7834
7835
7836 for {
7837 valoff1 := auxIntToValAndOff(v.AuxInt)
7838 sym1 := auxToSym(v.Aux)
7839 if v_0.Op != OpAMD64LEAQ {
7840 break
7841 }
7842 off2 := auxIntToInt32(v_0.AuxInt)
7843 sym2 := auxToSym(v_0.Aux)
7844 base := v_0.Args[0]
7845 mem := v_1
7846 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7847 break
7848 }
7849 v.reset(OpAMD64CMPWconstload)
7850 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7851 v.Aux = symToAux(mergeSym(sym1, sym2))
7852 v.AddArg2(base, mem)
7853 return true
7854 }
7855 return false
7856 }
7857 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
7858 v_2 := v.Args[2]
7859 v_1 := v.Args[1]
7860 v_0 := v.Args[0]
7861
7862
7863
7864 for {
7865 off1 := auxIntToInt32(v.AuxInt)
7866 sym := auxToSym(v.Aux)
7867 if v_0.Op != OpAMD64ADDQconst {
7868 break
7869 }
7870 off2 := auxIntToInt32(v_0.AuxInt)
7871 base := v_0.Args[0]
7872 val := v_1
7873 mem := v_2
7874 if !(is32Bit(int64(off1) + int64(off2))) {
7875 break
7876 }
7877 v.reset(OpAMD64CMPWload)
7878 v.AuxInt = int32ToAuxInt(off1 + off2)
7879 v.Aux = symToAux(sym)
7880 v.AddArg3(base, val, mem)
7881 return true
7882 }
7883
7884
7885
7886 for {
7887 off1 := auxIntToInt32(v.AuxInt)
7888 sym1 := auxToSym(v.Aux)
7889 if v_0.Op != OpAMD64LEAQ {
7890 break
7891 }
7892 off2 := auxIntToInt32(v_0.AuxInt)
7893 sym2 := auxToSym(v_0.Aux)
7894 base := v_0.Args[0]
7895 val := v_1
7896 mem := v_2
7897 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7898 break
7899 }
7900 v.reset(OpAMD64CMPWload)
7901 v.AuxInt = int32ToAuxInt(off1 + off2)
7902 v.Aux = symToAux(mergeSym(sym1, sym2))
7903 v.AddArg3(base, val, mem)
7904 return true
7905 }
7906
7907
7908 for {
7909 off := auxIntToInt32(v.AuxInt)
7910 sym := auxToSym(v.Aux)
7911 ptr := v_0
7912 if v_1.Op != OpAMD64MOVLconst {
7913 break
7914 }
7915 c := auxIntToInt32(v_1.AuxInt)
7916 mem := v_2
7917 v.reset(OpAMD64CMPWconstload)
7918 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
7919 v.Aux = symToAux(sym)
7920 v.AddArg2(ptr, mem)
7921 return true
7922 }
7923 return false
7924 }
7925 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
7926 v_3 := v.Args[3]
7927 v_2 := v.Args[2]
7928 v_1 := v.Args[1]
7929 v_0 := v.Args[0]
7930
7931
7932
7933 for {
7934 off1 := auxIntToInt32(v.AuxInt)
7935 sym := auxToSym(v.Aux)
7936 if v_0.Op != OpAMD64ADDQconst {
7937 break
7938 }
7939 off2 := auxIntToInt32(v_0.AuxInt)
7940 ptr := v_0.Args[0]
7941 old := v_1
7942 new_ := v_2
7943 mem := v_3
7944 if !(is32Bit(int64(off1) + int64(off2))) {
7945 break
7946 }
7947 v.reset(OpAMD64CMPXCHGLlock)
7948 v.AuxInt = int32ToAuxInt(off1 + off2)
7949 v.Aux = symToAux(sym)
7950 v.AddArg4(ptr, old, new_, mem)
7951 return true
7952 }
7953 return false
7954 }
7955 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
7956 v_3 := v.Args[3]
7957 v_2 := v.Args[2]
7958 v_1 := v.Args[1]
7959 v_0 := v.Args[0]
7960
7961
7962
7963 for {
7964 off1 := auxIntToInt32(v.AuxInt)
7965 sym := auxToSym(v.Aux)
7966 if v_0.Op != OpAMD64ADDQconst {
7967 break
7968 }
7969 off2 := auxIntToInt32(v_0.AuxInt)
7970 ptr := v_0.Args[0]
7971 old := v_1
7972 new_ := v_2
7973 mem := v_3
7974 if !(is32Bit(int64(off1) + int64(off2))) {
7975 break
7976 }
7977 v.reset(OpAMD64CMPXCHGQlock)
7978 v.AuxInt = int32ToAuxInt(off1 + off2)
7979 v.Aux = symToAux(sym)
7980 v.AddArg4(ptr, old, new_, mem)
7981 return true
7982 }
7983 return false
7984 }
7985 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
7986 v_1 := v.Args[1]
7987 v_0 := v.Args[0]
7988
7989
7990
7991 for {
7992 x := v_0
7993 l := v_1
7994 if l.Op != OpAMD64MOVSDload {
7995 break
7996 }
7997 off := auxIntToInt32(l.AuxInt)
7998 sym := auxToSym(l.Aux)
7999 mem := l.Args[1]
8000 ptr := l.Args[0]
8001 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8002 break
8003 }
8004 v.reset(OpAMD64DIVSDload)
8005 v.AuxInt = int32ToAuxInt(off)
8006 v.Aux = symToAux(sym)
8007 v.AddArg3(x, ptr, mem)
8008 return true
8009 }
8010 return false
8011 }
8012 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8013 v_2 := v.Args[2]
8014 v_1 := v.Args[1]
8015 v_0 := v.Args[0]
8016
8017
8018
8019 for {
8020 off1 := auxIntToInt32(v.AuxInt)
8021 sym := auxToSym(v.Aux)
8022 val := v_0
8023 if v_1.Op != OpAMD64ADDQconst {
8024 break
8025 }
8026 off2 := auxIntToInt32(v_1.AuxInt)
8027 base := v_1.Args[0]
8028 mem := v_2
8029 if !(is32Bit(int64(off1) + int64(off2))) {
8030 break
8031 }
8032 v.reset(OpAMD64DIVSDload)
8033 v.AuxInt = int32ToAuxInt(off1 + off2)
8034 v.Aux = symToAux(sym)
8035 v.AddArg3(val, base, mem)
8036 return true
8037 }
8038
8039
8040
8041 for {
8042 off1 := auxIntToInt32(v.AuxInt)
8043 sym1 := auxToSym(v.Aux)
8044 val := v_0
8045 if v_1.Op != OpAMD64LEAQ {
8046 break
8047 }
8048 off2 := auxIntToInt32(v_1.AuxInt)
8049 sym2 := auxToSym(v_1.Aux)
8050 base := v_1.Args[0]
8051 mem := v_2
8052 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8053 break
8054 }
8055 v.reset(OpAMD64DIVSDload)
8056 v.AuxInt = int32ToAuxInt(off1 + off2)
8057 v.Aux = symToAux(mergeSym(sym1, sym2))
8058 v.AddArg3(val, base, mem)
8059 return true
8060 }
8061 return false
8062 }
8063 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8064 v_1 := v.Args[1]
8065 v_0 := v.Args[0]
8066
8067
8068
8069 for {
8070 x := v_0
8071 l := v_1
8072 if l.Op != OpAMD64MOVSSload {
8073 break
8074 }
8075 off := auxIntToInt32(l.AuxInt)
8076 sym := auxToSym(l.Aux)
8077 mem := l.Args[1]
8078 ptr := l.Args[0]
8079 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8080 break
8081 }
8082 v.reset(OpAMD64DIVSSload)
8083 v.AuxInt = int32ToAuxInt(off)
8084 v.Aux = symToAux(sym)
8085 v.AddArg3(x, ptr, mem)
8086 return true
8087 }
8088 return false
8089 }
8090 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8091 v_2 := v.Args[2]
8092 v_1 := v.Args[1]
8093 v_0 := v.Args[0]
8094
8095
8096
8097 for {
8098 off1 := auxIntToInt32(v.AuxInt)
8099 sym := auxToSym(v.Aux)
8100 val := v_0
8101 if v_1.Op != OpAMD64ADDQconst {
8102 break
8103 }
8104 off2 := auxIntToInt32(v_1.AuxInt)
8105 base := v_1.Args[0]
8106 mem := v_2
8107 if !(is32Bit(int64(off1) + int64(off2))) {
8108 break
8109 }
8110 v.reset(OpAMD64DIVSSload)
8111 v.AuxInt = int32ToAuxInt(off1 + off2)
8112 v.Aux = symToAux(sym)
8113 v.AddArg3(val, base, mem)
8114 return true
8115 }
8116
8117
8118
8119 for {
8120 off1 := auxIntToInt32(v.AuxInt)
8121 sym1 := auxToSym(v.Aux)
8122 val := v_0
8123 if v_1.Op != OpAMD64LEAQ {
8124 break
8125 }
8126 off2 := auxIntToInt32(v_1.AuxInt)
8127 sym2 := auxToSym(v_1.Aux)
8128 base := v_1.Args[0]
8129 mem := v_2
8130 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8131 break
8132 }
8133 v.reset(OpAMD64DIVSSload)
8134 v.AuxInt = int32ToAuxInt(off1 + off2)
8135 v.Aux = symToAux(mergeSym(sym1, sym2))
8136 v.AddArg3(val, base, mem)
8137 return true
8138 }
8139 return false
8140 }
8141 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8142 v_1 := v.Args[1]
8143 v_0 := v.Args[0]
8144
8145
8146
8147 for {
8148 x := v_0
8149 y := v_1
8150 if !(!x.rematerializeable() && y.rematerializeable()) {
8151 break
8152 }
8153 v.reset(OpAMD64HMULL)
8154 v.AddArg2(y, x)
8155 return true
8156 }
8157 return false
8158 }
8159 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8160 v_1 := v.Args[1]
8161 v_0 := v.Args[0]
8162
8163
8164
8165 for {
8166 x := v_0
8167 y := v_1
8168 if !(!x.rematerializeable() && y.rematerializeable()) {
8169 break
8170 }
8171 v.reset(OpAMD64HMULLU)
8172 v.AddArg2(y, x)
8173 return true
8174 }
8175 return false
8176 }
8177 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8178 v_1 := v.Args[1]
8179 v_0 := v.Args[0]
8180
8181
8182
8183 for {
8184 x := v_0
8185 y := v_1
8186 if !(!x.rematerializeable() && y.rematerializeable()) {
8187 break
8188 }
8189 v.reset(OpAMD64HMULQ)
8190 v.AddArg2(y, x)
8191 return true
8192 }
8193 return false
8194 }
8195 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8196 v_1 := v.Args[1]
8197 v_0 := v.Args[0]
8198
8199
8200
8201 for {
8202 x := v_0
8203 y := v_1
8204 if !(!x.rematerializeable() && y.rematerializeable()) {
8205 break
8206 }
8207 v.reset(OpAMD64HMULQU)
8208 v.AddArg2(y, x)
8209 return true
8210 }
8211 return false
8212 }
8213 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8214 v_0 := v.Args[0]
8215
8216
8217
8218 for {
8219 c := auxIntToInt32(v.AuxInt)
8220 s := auxToSym(v.Aux)
8221 if v_0.Op != OpAMD64ADDLconst {
8222 break
8223 }
8224 d := auxIntToInt32(v_0.AuxInt)
8225 x := v_0.Args[0]
8226 if !(is32Bit(int64(c) + int64(d))) {
8227 break
8228 }
8229 v.reset(OpAMD64LEAL)
8230 v.AuxInt = int32ToAuxInt(c + d)
8231 v.Aux = symToAux(s)
8232 v.AddArg(x)
8233 return true
8234 }
8235
8236
8237
8238 for {
8239 c := auxIntToInt32(v.AuxInt)
8240 s := auxToSym(v.Aux)
8241 if v_0.Op != OpAMD64ADDL {
8242 break
8243 }
8244 _ = v_0.Args[1]
8245 v_0_0 := v_0.Args[0]
8246 v_0_1 := v_0.Args[1]
8247 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8248 x := v_0_0
8249 y := v_0_1
8250 if !(x.Op != OpSB && y.Op != OpSB) {
8251 continue
8252 }
8253 v.reset(OpAMD64LEAL1)
8254 v.AuxInt = int32ToAuxInt(c)
8255 v.Aux = symToAux(s)
8256 v.AddArg2(x, y)
8257 return true
8258 }
8259 break
8260 }
8261 return false
8262 }
8263 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8264 v_1 := v.Args[1]
8265 v_0 := v.Args[0]
8266
8267
8268
8269 for {
8270 c := auxIntToInt32(v.AuxInt)
8271 s := auxToSym(v.Aux)
8272 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8273 if v_0.Op != OpAMD64ADDLconst {
8274 continue
8275 }
8276 d := auxIntToInt32(v_0.AuxInt)
8277 x := v_0.Args[0]
8278 y := v_1
8279 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8280 continue
8281 }
8282 v.reset(OpAMD64LEAL1)
8283 v.AuxInt = int32ToAuxInt(c + d)
8284 v.Aux = symToAux(s)
8285 v.AddArg2(x, y)
8286 return true
8287 }
8288 break
8289 }
8290
8291
8292 for {
8293 c := auxIntToInt32(v.AuxInt)
8294 s := auxToSym(v.Aux)
8295 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8296 x := v_0
8297 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8298 continue
8299 }
8300 y := v_1.Args[0]
8301 v.reset(OpAMD64LEAL2)
8302 v.AuxInt = int32ToAuxInt(c)
8303 v.Aux = symToAux(s)
8304 v.AddArg2(x, y)
8305 return true
8306 }
8307 break
8308 }
8309
8310
8311 for {
8312 c := auxIntToInt32(v.AuxInt)
8313 s := auxToSym(v.Aux)
8314 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8315 x := v_0
8316 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8317 continue
8318 }
8319 y := v_1.Args[0]
8320 v.reset(OpAMD64LEAL4)
8321 v.AuxInt = int32ToAuxInt(c)
8322 v.Aux = symToAux(s)
8323 v.AddArg2(x, y)
8324 return true
8325 }
8326 break
8327 }
8328
8329
8330 for {
8331 c := auxIntToInt32(v.AuxInt)
8332 s := auxToSym(v.Aux)
8333 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8334 x := v_0
8335 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8336 continue
8337 }
8338 y := v_1.Args[0]
8339 v.reset(OpAMD64LEAL8)
8340 v.AuxInt = int32ToAuxInt(c)
8341 v.Aux = symToAux(s)
8342 v.AddArg2(x, y)
8343 return true
8344 }
8345 break
8346 }
8347 return false
8348 }
8349 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8350 v_1 := v.Args[1]
8351 v_0 := v.Args[0]
8352
8353
8354
8355 for {
8356 c := auxIntToInt32(v.AuxInt)
8357 s := auxToSym(v.Aux)
8358 if v_0.Op != OpAMD64ADDLconst {
8359 break
8360 }
8361 d := auxIntToInt32(v_0.AuxInt)
8362 x := v_0.Args[0]
8363 y := v_1
8364 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8365 break
8366 }
8367 v.reset(OpAMD64LEAL2)
8368 v.AuxInt = int32ToAuxInt(c + d)
8369 v.Aux = symToAux(s)
8370 v.AddArg2(x, y)
8371 return true
8372 }
8373
8374
8375
8376 for {
8377 c := auxIntToInt32(v.AuxInt)
8378 s := auxToSym(v.Aux)
8379 x := v_0
8380 if v_1.Op != OpAMD64ADDLconst {
8381 break
8382 }
8383 d := auxIntToInt32(v_1.AuxInt)
8384 y := v_1.Args[0]
8385 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8386 break
8387 }
8388 v.reset(OpAMD64LEAL2)
8389 v.AuxInt = int32ToAuxInt(c + 2*d)
8390 v.Aux = symToAux(s)
8391 v.AddArg2(x, y)
8392 return true
8393 }
8394
8395
8396 for {
8397 c := auxIntToInt32(v.AuxInt)
8398 s := auxToSym(v.Aux)
8399 x := v_0
8400 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8401 break
8402 }
8403 y := v_1.Args[0]
8404 v.reset(OpAMD64LEAL4)
8405 v.AuxInt = int32ToAuxInt(c)
8406 v.Aux = symToAux(s)
8407 v.AddArg2(x, y)
8408 return true
8409 }
8410
8411
8412 for {
8413 c := auxIntToInt32(v.AuxInt)
8414 s := auxToSym(v.Aux)
8415 x := v_0
8416 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8417 break
8418 }
8419 y := v_1.Args[0]
8420 v.reset(OpAMD64LEAL8)
8421 v.AuxInt = int32ToAuxInt(c)
8422 v.Aux = symToAux(s)
8423 v.AddArg2(x, y)
8424 return true
8425 }
8426 return false
8427 }
8428 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8429 v_1 := v.Args[1]
8430 v_0 := v.Args[0]
8431
8432
8433
8434 for {
8435 c := auxIntToInt32(v.AuxInt)
8436 s := auxToSym(v.Aux)
8437 if v_0.Op != OpAMD64ADDLconst {
8438 break
8439 }
8440 d := auxIntToInt32(v_0.AuxInt)
8441 x := v_0.Args[0]
8442 y := v_1
8443 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8444 break
8445 }
8446 v.reset(OpAMD64LEAL4)
8447 v.AuxInt = int32ToAuxInt(c + d)
8448 v.Aux = symToAux(s)
8449 v.AddArg2(x, y)
8450 return true
8451 }
8452
8453
8454
8455 for {
8456 c := auxIntToInt32(v.AuxInt)
8457 s := auxToSym(v.Aux)
8458 x := v_0
8459 if v_1.Op != OpAMD64ADDLconst {
8460 break
8461 }
8462 d := auxIntToInt32(v_1.AuxInt)
8463 y := v_1.Args[0]
8464 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
8465 break
8466 }
8467 v.reset(OpAMD64LEAL4)
8468 v.AuxInt = int32ToAuxInt(c + 4*d)
8469 v.Aux = symToAux(s)
8470 v.AddArg2(x, y)
8471 return true
8472 }
8473
8474
8475 for {
8476 c := auxIntToInt32(v.AuxInt)
8477 s := auxToSym(v.Aux)
8478 x := v_0
8479 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8480 break
8481 }
8482 y := v_1.Args[0]
8483 v.reset(OpAMD64LEAL8)
8484 v.AuxInt = int32ToAuxInt(c)
8485 v.Aux = symToAux(s)
8486 v.AddArg2(x, y)
8487 return true
8488 }
8489 return false
8490 }
8491 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8492 v_1 := v.Args[1]
8493 v_0 := v.Args[0]
8494
8495
8496
8497 for {
8498 c := auxIntToInt32(v.AuxInt)
8499 s := auxToSym(v.Aux)
8500 if v_0.Op != OpAMD64ADDLconst {
8501 break
8502 }
8503 d := auxIntToInt32(v_0.AuxInt)
8504 x := v_0.Args[0]
8505 y := v_1
8506 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8507 break
8508 }
8509 v.reset(OpAMD64LEAL8)
8510 v.AuxInt = int32ToAuxInt(c + d)
8511 v.Aux = symToAux(s)
8512 v.AddArg2(x, y)
8513 return true
8514 }
8515
8516
8517
8518 for {
8519 c := auxIntToInt32(v.AuxInt)
8520 s := auxToSym(v.Aux)
8521 x := v_0
8522 if v_1.Op != OpAMD64ADDLconst {
8523 break
8524 }
8525 d := auxIntToInt32(v_1.AuxInt)
8526 y := v_1.Args[0]
8527 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
8528 break
8529 }
8530 v.reset(OpAMD64LEAL8)
8531 v.AuxInt = int32ToAuxInt(c + 8*d)
8532 v.Aux = symToAux(s)
8533 v.AddArg2(x, y)
8534 return true
8535 }
8536 return false
8537 }
8538 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
8539 v_0 := v.Args[0]
8540
8541
8542
8543 for {
8544 c := auxIntToInt32(v.AuxInt)
8545 s := auxToSym(v.Aux)
8546 if v_0.Op != OpAMD64ADDQconst {
8547 break
8548 }
8549 d := auxIntToInt32(v_0.AuxInt)
8550 x := v_0.Args[0]
8551 if !(is32Bit(int64(c) + int64(d))) {
8552 break
8553 }
8554 v.reset(OpAMD64LEAQ)
8555 v.AuxInt = int32ToAuxInt(c + d)
8556 v.Aux = symToAux(s)
8557 v.AddArg(x)
8558 return true
8559 }
8560
8561
8562
8563 for {
8564 c := auxIntToInt32(v.AuxInt)
8565 s := auxToSym(v.Aux)
8566 if v_0.Op != OpAMD64ADDQ {
8567 break
8568 }
8569 _ = v_0.Args[1]
8570 v_0_0 := v_0.Args[0]
8571 v_0_1 := v_0.Args[1]
8572 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8573 x := v_0_0
8574 y := v_0_1
8575 if !(x.Op != OpSB && y.Op != OpSB) {
8576 continue
8577 }
8578 v.reset(OpAMD64LEAQ1)
8579 v.AuxInt = int32ToAuxInt(c)
8580 v.Aux = symToAux(s)
8581 v.AddArg2(x, y)
8582 return true
8583 }
8584 break
8585 }
8586
8587
8588
8589 for {
8590 off1 := auxIntToInt32(v.AuxInt)
8591 sym1 := auxToSym(v.Aux)
8592 if v_0.Op != OpAMD64LEAQ {
8593 break
8594 }
8595 off2 := auxIntToInt32(v_0.AuxInt)
8596 sym2 := auxToSym(v_0.Aux)
8597 x := v_0.Args[0]
8598 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8599 break
8600 }
8601 v.reset(OpAMD64LEAQ)
8602 v.AuxInt = int32ToAuxInt(off1 + off2)
8603 v.Aux = symToAux(mergeSym(sym1, sym2))
8604 v.AddArg(x)
8605 return true
8606 }
8607
8608
8609
8610 for {
8611 off1 := auxIntToInt32(v.AuxInt)
8612 sym1 := auxToSym(v.Aux)
8613 if v_0.Op != OpAMD64LEAQ1 {
8614 break
8615 }
8616 off2 := auxIntToInt32(v_0.AuxInt)
8617 sym2 := auxToSym(v_0.Aux)
8618 y := v_0.Args[1]
8619 x := v_0.Args[0]
8620 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8621 break
8622 }
8623 v.reset(OpAMD64LEAQ1)
8624 v.AuxInt = int32ToAuxInt(off1 + off2)
8625 v.Aux = symToAux(mergeSym(sym1, sym2))
8626 v.AddArg2(x, y)
8627 return true
8628 }
8629
8630
8631
8632 for {
8633 off1 := auxIntToInt32(v.AuxInt)
8634 sym1 := auxToSym(v.Aux)
8635 if v_0.Op != OpAMD64LEAQ2 {
8636 break
8637 }
8638 off2 := auxIntToInt32(v_0.AuxInt)
8639 sym2 := auxToSym(v_0.Aux)
8640 y := v_0.Args[1]
8641 x := v_0.Args[0]
8642 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8643 break
8644 }
8645 v.reset(OpAMD64LEAQ2)
8646 v.AuxInt = int32ToAuxInt(off1 + off2)
8647 v.Aux = symToAux(mergeSym(sym1, sym2))
8648 v.AddArg2(x, y)
8649 return true
8650 }
8651
8652
8653
8654 for {
8655 off1 := auxIntToInt32(v.AuxInt)
8656 sym1 := auxToSym(v.Aux)
8657 if v_0.Op != OpAMD64LEAQ4 {
8658 break
8659 }
8660 off2 := auxIntToInt32(v_0.AuxInt)
8661 sym2 := auxToSym(v_0.Aux)
8662 y := v_0.Args[1]
8663 x := v_0.Args[0]
8664 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8665 break
8666 }
8667 v.reset(OpAMD64LEAQ4)
8668 v.AuxInt = int32ToAuxInt(off1 + off2)
8669 v.Aux = symToAux(mergeSym(sym1, sym2))
8670 v.AddArg2(x, y)
8671 return true
8672 }
8673
8674
8675
8676 for {
8677 off1 := auxIntToInt32(v.AuxInt)
8678 sym1 := auxToSym(v.Aux)
8679 if v_0.Op != OpAMD64LEAQ8 {
8680 break
8681 }
8682 off2 := auxIntToInt32(v_0.AuxInt)
8683 sym2 := auxToSym(v_0.Aux)
8684 y := v_0.Args[1]
8685 x := v_0.Args[0]
8686 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8687 break
8688 }
8689 v.reset(OpAMD64LEAQ8)
8690 v.AuxInt = int32ToAuxInt(off1 + off2)
8691 v.Aux = symToAux(mergeSym(sym1, sym2))
8692 v.AddArg2(x, y)
8693 return true
8694 }
8695 return false
8696 }
8697 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
8698 v_1 := v.Args[1]
8699 v_0 := v.Args[0]
8700
8701
8702
8703 for {
8704 c := auxIntToInt32(v.AuxInt)
8705 s := auxToSym(v.Aux)
8706 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8707 if v_0.Op != OpAMD64ADDQconst {
8708 continue
8709 }
8710 d := auxIntToInt32(v_0.AuxInt)
8711 x := v_0.Args[0]
8712 y := v_1
8713 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8714 continue
8715 }
8716 v.reset(OpAMD64LEAQ1)
8717 v.AuxInt = int32ToAuxInt(c + d)
8718 v.Aux = symToAux(s)
8719 v.AddArg2(x, y)
8720 return true
8721 }
8722 break
8723 }
8724
8725
8726 for {
8727 c := auxIntToInt32(v.AuxInt)
8728 s := auxToSym(v.Aux)
8729 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8730 x := v_0
8731 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8732 continue
8733 }
8734 y := v_1.Args[0]
8735 v.reset(OpAMD64LEAQ2)
8736 v.AuxInt = int32ToAuxInt(c)
8737 v.Aux = symToAux(s)
8738 v.AddArg2(x, y)
8739 return true
8740 }
8741 break
8742 }
8743
8744
8745 for {
8746 c := auxIntToInt32(v.AuxInt)
8747 s := auxToSym(v.Aux)
8748 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8749 x := v_0
8750 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8751 continue
8752 }
8753 y := v_1.Args[0]
8754 v.reset(OpAMD64LEAQ4)
8755 v.AuxInt = int32ToAuxInt(c)
8756 v.Aux = symToAux(s)
8757 v.AddArg2(x, y)
8758 return true
8759 }
8760 break
8761 }
8762
8763
8764 for {
8765 c := auxIntToInt32(v.AuxInt)
8766 s := auxToSym(v.Aux)
8767 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8768 x := v_0
8769 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
8770 continue
8771 }
8772 y := v_1.Args[0]
8773 v.reset(OpAMD64LEAQ8)
8774 v.AuxInt = int32ToAuxInt(c)
8775 v.Aux = symToAux(s)
8776 v.AddArg2(x, y)
8777 return true
8778 }
8779 break
8780 }
8781
8782
8783
8784 for {
8785 off1 := auxIntToInt32(v.AuxInt)
8786 sym1 := auxToSym(v.Aux)
8787 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8788 if v_0.Op != OpAMD64LEAQ {
8789 continue
8790 }
8791 off2 := auxIntToInt32(v_0.AuxInt)
8792 sym2 := auxToSym(v_0.Aux)
8793 x := v_0.Args[0]
8794 y := v_1
8795 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8796 continue
8797 }
8798 v.reset(OpAMD64LEAQ1)
8799 v.AuxInt = int32ToAuxInt(off1 + off2)
8800 v.Aux = symToAux(mergeSym(sym1, sym2))
8801 v.AddArg2(x, y)
8802 return true
8803 }
8804 break
8805 }
8806
8807
8808
8809 for {
8810 off1 := auxIntToInt32(v.AuxInt)
8811 sym1 := auxToSym(v.Aux)
8812 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8813 x := v_0
8814 if v_1.Op != OpAMD64LEAQ1 {
8815 continue
8816 }
8817 off2 := auxIntToInt32(v_1.AuxInt)
8818 sym2 := auxToSym(v_1.Aux)
8819 y := v_1.Args[1]
8820 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8821 continue
8822 }
8823 v.reset(OpAMD64LEAQ2)
8824 v.AuxInt = int32ToAuxInt(off1 + off2)
8825 v.Aux = symToAux(mergeSym(sym1, sym2))
8826 v.AddArg2(x, y)
8827 return true
8828 }
8829 break
8830 }
8831
8832
8833
8834 for {
8835 off1 := auxIntToInt32(v.AuxInt)
8836 sym1 := auxToSym(v.Aux)
8837 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8838 x := v_0
8839 if v_1.Op != OpAMD64LEAQ1 {
8840 continue
8841 }
8842 off2 := auxIntToInt32(v_1.AuxInt)
8843 sym2 := auxToSym(v_1.Aux)
8844 _ = v_1.Args[1]
8845 v_1_0 := v_1.Args[0]
8846 v_1_1 := v_1.Args[1]
8847 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
8848 if x != v_1_0 {
8849 continue
8850 }
8851 y := v_1_1
8852 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8853 continue
8854 }
8855 v.reset(OpAMD64LEAQ2)
8856 v.AuxInt = int32ToAuxInt(off1 + off2)
8857 v.Aux = symToAux(mergeSym(sym1, sym2))
8858 v.AddArg2(y, x)
8859 return true
8860 }
8861 }
8862 break
8863 }
8864
8865
8866
8867 for {
8868 if auxIntToInt32(v.AuxInt) != 0 {
8869 break
8870 }
8871 x := v_0
8872 y := v_1
8873 if !(v.Aux == nil) {
8874 break
8875 }
8876 v.reset(OpAMD64ADDQ)
8877 v.AddArg2(x, y)
8878 return true
8879 }
8880 return false
8881 }
8882 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
8883 v_1 := v.Args[1]
8884 v_0 := v.Args[0]
8885
8886
8887
8888 for {
8889 c := auxIntToInt32(v.AuxInt)
8890 s := auxToSym(v.Aux)
8891 if v_0.Op != OpAMD64ADDQconst {
8892 break
8893 }
8894 d := auxIntToInt32(v_0.AuxInt)
8895 x := v_0.Args[0]
8896 y := v_1
8897 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8898 break
8899 }
8900 v.reset(OpAMD64LEAQ2)
8901 v.AuxInt = int32ToAuxInt(c + d)
8902 v.Aux = symToAux(s)
8903 v.AddArg2(x, y)
8904 return true
8905 }
8906
8907
8908
8909 for {
8910 c := auxIntToInt32(v.AuxInt)
8911 s := auxToSym(v.Aux)
8912 x := v_0
8913 if v_1.Op != OpAMD64ADDQconst {
8914 break
8915 }
8916 d := auxIntToInt32(v_1.AuxInt)
8917 y := v_1.Args[0]
8918 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8919 break
8920 }
8921 v.reset(OpAMD64LEAQ2)
8922 v.AuxInt = int32ToAuxInt(c + 2*d)
8923 v.Aux = symToAux(s)
8924 v.AddArg2(x, y)
8925 return true
8926 }
8927
8928
8929 for {
8930 c := auxIntToInt32(v.AuxInt)
8931 s := auxToSym(v.Aux)
8932 x := v_0
8933 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8934 break
8935 }
8936 y := v_1.Args[0]
8937 v.reset(OpAMD64LEAQ4)
8938 v.AuxInt = int32ToAuxInt(c)
8939 v.Aux = symToAux(s)
8940 v.AddArg2(x, y)
8941 return true
8942 }
8943
8944
8945 for {
8946 c := auxIntToInt32(v.AuxInt)
8947 s := auxToSym(v.Aux)
8948 x := v_0
8949 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8950 break
8951 }
8952 y := v_1.Args[0]
8953 v.reset(OpAMD64LEAQ8)
8954 v.AuxInt = int32ToAuxInt(c)
8955 v.Aux = symToAux(s)
8956 v.AddArg2(x, y)
8957 return true
8958 }
8959
8960
8961
8962 for {
8963 off1 := auxIntToInt32(v.AuxInt)
8964 sym1 := auxToSym(v.Aux)
8965 if v_0.Op != OpAMD64LEAQ {
8966 break
8967 }
8968 off2 := auxIntToInt32(v_0.AuxInt)
8969 sym2 := auxToSym(v_0.Aux)
8970 x := v_0.Args[0]
8971 y := v_1
8972 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8973 break
8974 }
8975 v.reset(OpAMD64LEAQ2)
8976 v.AuxInt = int32ToAuxInt(off1 + off2)
8977 v.Aux = symToAux(mergeSym(sym1, sym2))
8978 v.AddArg2(x, y)
8979 return true
8980 }
8981
8982
8983
8984 for {
8985 off1 := auxIntToInt32(v.AuxInt)
8986 sym1 := auxToSym(v.Aux)
8987 x := v_0
8988 if v_1.Op != OpAMD64LEAQ1 {
8989 break
8990 }
8991 off2 := auxIntToInt32(v_1.AuxInt)
8992 sym2 := auxToSym(v_1.Aux)
8993 y := v_1.Args[1]
8994 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
8995 break
8996 }
8997 v.reset(OpAMD64LEAQ4)
8998 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
8999 v.Aux = symToAux(sym1)
9000 v.AddArg2(x, y)
9001 return true
9002 }
9003
9004
9005
9006 for {
9007 off := auxIntToInt32(v.AuxInt)
9008 sym := auxToSym(v.Aux)
9009 x := v_0
9010 if v_1.Op != OpAMD64MOVQconst {
9011 break
9012 }
9013 scale := auxIntToInt64(v_1.AuxInt)
9014 if !(is32Bit(int64(off) + int64(scale)*2)) {
9015 break
9016 }
9017 v.reset(OpAMD64LEAQ)
9018 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9019 v.Aux = symToAux(sym)
9020 v.AddArg(x)
9021 return true
9022 }
9023
9024
9025
9026 for {
9027 off := auxIntToInt32(v.AuxInt)
9028 sym := auxToSym(v.Aux)
9029 x := v_0
9030 if v_1.Op != OpAMD64MOVLconst {
9031 break
9032 }
9033 scale := auxIntToInt32(v_1.AuxInt)
9034 if !(is32Bit(int64(off) + int64(scale)*2)) {
9035 break
9036 }
9037 v.reset(OpAMD64LEAQ)
9038 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9039 v.Aux = symToAux(sym)
9040 v.AddArg(x)
9041 return true
9042 }
9043 return false
9044 }
9045 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9046 v_1 := v.Args[1]
9047 v_0 := v.Args[0]
9048
9049
9050
9051 for {
9052 c := auxIntToInt32(v.AuxInt)
9053 s := auxToSym(v.Aux)
9054 if v_0.Op != OpAMD64ADDQconst {
9055 break
9056 }
9057 d := auxIntToInt32(v_0.AuxInt)
9058 x := v_0.Args[0]
9059 y := v_1
9060 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9061 break
9062 }
9063 v.reset(OpAMD64LEAQ4)
9064 v.AuxInt = int32ToAuxInt(c + d)
9065 v.Aux = symToAux(s)
9066 v.AddArg2(x, y)
9067 return true
9068 }
9069
9070
9071
9072 for {
9073 c := auxIntToInt32(v.AuxInt)
9074 s := auxToSym(v.Aux)
9075 x := v_0
9076 if v_1.Op != OpAMD64ADDQconst {
9077 break
9078 }
9079 d := auxIntToInt32(v_1.AuxInt)
9080 y := v_1.Args[0]
9081 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9082 break
9083 }
9084 v.reset(OpAMD64LEAQ4)
9085 v.AuxInt = int32ToAuxInt(c + 4*d)
9086 v.Aux = symToAux(s)
9087 v.AddArg2(x, y)
9088 return true
9089 }
9090
9091
9092 for {
9093 c := auxIntToInt32(v.AuxInt)
9094 s := auxToSym(v.Aux)
9095 x := v_0
9096 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9097 break
9098 }
9099 y := v_1.Args[0]
9100 v.reset(OpAMD64LEAQ8)
9101 v.AuxInt = int32ToAuxInt(c)
9102 v.Aux = symToAux(s)
9103 v.AddArg2(x, y)
9104 return true
9105 }
9106
9107
9108
9109 for {
9110 off1 := auxIntToInt32(v.AuxInt)
9111 sym1 := auxToSym(v.Aux)
9112 if v_0.Op != OpAMD64LEAQ {
9113 break
9114 }
9115 off2 := auxIntToInt32(v_0.AuxInt)
9116 sym2 := auxToSym(v_0.Aux)
9117 x := v_0.Args[0]
9118 y := v_1
9119 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9120 break
9121 }
9122 v.reset(OpAMD64LEAQ4)
9123 v.AuxInt = int32ToAuxInt(off1 + off2)
9124 v.Aux = symToAux(mergeSym(sym1, sym2))
9125 v.AddArg2(x, y)
9126 return true
9127 }
9128
9129
9130
9131 for {
9132 off1 := auxIntToInt32(v.AuxInt)
9133 sym1 := auxToSym(v.Aux)
9134 x := v_0
9135 if v_1.Op != OpAMD64LEAQ1 {
9136 break
9137 }
9138 off2 := auxIntToInt32(v_1.AuxInt)
9139 sym2 := auxToSym(v_1.Aux)
9140 y := v_1.Args[1]
9141 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9142 break
9143 }
9144 v.reset(OpAMD64LEAQ8)
9145 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9146 v.Aux = symToAux(sym1)
9147 v.AddArg2(x, y)
9148 return true
9149 }
9150
9151
9152
9153 for {
9154 off := auxIntToInt32(v.AuxInt)
9155 sym := auxToSym(v.Aux)
9156 x := v_0
9157 if v_1.Op != OpAMD64MOVQconst {
9158 break
9159 }
9160 scale := auxIntToInt64(v_1.AuxInt)
9161 if !(is32Bit(int64(off) + int64(scale)*4)) {
9162 break
9163 }
9164 v.reset(OpAMD64LEAQ)
9165 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9166 v.Aux = symToAux(sym)
9167 v.AddArg(x)
9168 return true
9169 }
9170
9171
9172
9173 for {
9174 off := auxIntToInt32(v.AuxInt)
9175 sym := auxToSym(v.Aux)
9176 x := v_0
9177 if v_1.Op != OpAMD64MOVLconst {
9178 break
9179 }
9180 scale := auxIntToInt32(v_1.AuxInt)
9181 if !(is32Bit(int64(off) + int64(scale)*4)) {
9182 break
9183 }
9184 v.reset(OpAMD64LEAQ)
9185 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9186 v.Aux = symToAux(sym)
9187 v.AddArg(x)
9188 return true
9189 }
9190 return false
9191 }
9192 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9193 v_1 := v.Args[1]
9194 v_0 := v.Args[0]
9195
9196
9197
9198 for {
9199 c := auxIntToInt32(v.AuxInt)
9200 s := auxToSym(v.Aux)
9201 if v_0.Op != OpAMD64ADDQconst {
9202 break
9203 }
9204 d := auxIntToInt32(v_0.AuxInt)
9205 x := v_0.Args[0]
9206 y := v_1
9207 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9208 break
9209 }
9210 v.reset(OpAMD64LEAQ8)
9211 v.AuxInt = int32ToAuxInt(c + d)
9212 v.Aux = symToAux(s)
9213 v.AddArg2(x, y)
9214 return true
9215 }
9216
9217
9218
9219 for {
9220 c := auxIntToInt32(v.AuxInt)
9221 s := auxToSym(v.Aux)
9222 x := v_0
9223 if v_1.Op != OpAMD64ADDQconst {
9224 break
9225 }
9226 d := auxIntToInt32(v_1.AuxInt)
9227 y := v_1.Args[0]
9228 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9229 break
9230 }
9231 v.reset(OpAMD64LEAQ8)
9232 v.AuxInt = int32ToAuxInt(c + 8*d)
9233 v.Aux = symToAux(s)
9234 v.AddArg2(x, y)
9235 return true
9236 }
9237
9238
9239
9240 for {
9241 off1 := auxIntToInt32(v.AuxInt)
9242 sym1 := auxToSym(v.Aux)
9243 if v_0.Op != OpAMD64LEAQ {
9244 break
9245 }
9246 off2 := auxIntToInt32(v_0.AuxInt)
9247 sym2 := auxToSym(v_0.Aux)
9248 x := v_0.Args[0]
9249 y := v_1
9250 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9251 break
9252 }
9253 v.reset(OpAMD64LEAQ8)
9254 v.AuxInt = int32ToAuxInt(off1 + off2)
9255 v.Aux = symToAux(mergeSym(sym1, sym2))
9256 v.AddArg2(x, y)
9257 return true
9258 }
9259
9260
9261
9262 for {
9263 off := auxIntToInt32(v.AuxInt)
9264 sym := auxToSym(v.Aux)
9265 x := v_0
9266 if v_1.Op != OpAMD64MOVQconst {
9267 break
9268 }
9269 scale := auxIntToInt64(v_1.AuxInt)
9270 if !(is32Bit(int64(off) + int64(scale)*8)) {
9271 break
9272 }
9273 v.reset(OpAMD64LEAQ)
9274 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9275 v.Aux = symToAux(sym)
9276 v.AddArg(x)
9277 return true
9278 }
9279
9280
9281
9282 for {
9283 off := auxIntToInt32(v.AuxInt)
9284 sym := auxToSym(v.Aux)
9285 x := v_0
9286 if v_1.Op != OpAMD64MOVLconst {
9287 break
9288 }
9289 scale := auxIntToInt32(v_1.AuxInt)
9290 if !(is32Bit(int64(off) + int64(scale)*8)) {
9291 break
9292 }
9293 v.reset(OpAMD64LEAQ)
9294 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9295 v.Aux = symToAux(sym)
9296 v.AddArg(x)
9297 return true
9298 }
9299 return false
9300 }
9301 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
9302 v_2 := v.Args[2]
9303 v_1 := v.Args[1]
9304 v_0 := v.Args[0]
9305
9306
9307
9308 for {
9309 i := auxIntToInt32(v.AuxInt)
9310 s := auxToSym(v.Aux)
9311 p := v_0
9312 x := v_1
9313 if x.Op != OpAMD64BSWAPL {
9314 break
9315 }
9316 w := x.Args[0]
9317 mem := v_2
9318 if !(x.Uses == 1) {
9319 break
9320 }
9321 v.reset(OpAMD64MOVLstore)
9322 v.AuxInt = int32ToAuxInt(i)
9323 v.Aux = symToAux(s)
9324 v.AddArg3(p, w, mem)
9325 return true
9326 }
9327 return false
9328 }
9329 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
9330 v_2 := v.Args[2]
9331 v_1 := v.Args[1]
9332 v_0 := v.Args[0]
9333
9334
9335
9336 for {
9337 i := auxIntToInt32(v.AuxInt)
9338 s := auxToSym(v.Aux)
9339 p := v_0
9340 x := v_1
9341 if x.Op != OpAMD64BSWAPQ {
9342 break
9343 }
9344 w := x.Args[0]
9345 mem := v_2
9346 if !(x.Uses == 1) {
9347 break
9348 }
9349 v.reset(OpAMD64MOVQstore)
9350 v.AuxInt = int32ToAuxInt(i)
9351 v.Aux = symToAux(s)
9352 v.AddArg3(p, w, mem)
9353 return true
9354 }
9355 return false
9356 }
9357 func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool {
9358 v_2 := v.Args[2]
9359 v_1 := v.Args[1]
9360 v_0 := v.Args[0]
9361
9362
9363
9364 for {
9365 i := auxIntToInt32(v.AuxInt)
9366 s := auxToSym(v.Aux)
9367 p := v_0
9368 x := v_1
9369 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
9370 break
9371 }
9372 w := x.Args[0]
9373 mem := v_2
9374 if !(x.Uses == 1) {
9375 break
9376 }
9377 v.reset(OpAMD64MOVWstore)
9378 v.AuxInt = int32ToAuxInt(i)
9379 v.Aux = symToAux(s)
9380 v.AddArg3(p, w, mem)
9381 return true
9382 }
9383 return false
9384 }
9385 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9386 v_0 := v.Args[0]
9387 b := v.Block
9388
9389
9390
9391 for {
9392 x := v_0
9393 if x.Op != OpAMD64MOVBload {
9394 break
9395 }
9396 off := auxIntToInt32(x.AuxInt)
9397 sym := auxToSym(x.Aux)
9398 mem := x.Args[1]
9399 ptr := x.Args[0]
9400 if !(x.Uses == 1 && clobber(x)) {
9401 break
9402 }
9403 b = x.Block
9404 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9405 v.copyOf(v0)
9406 v0.AuxInt = int32ToAuxInt(off)
9407 v0.Aux = symToAux(sym)
9408 v0.AddArg2(ptr, mem)
9409 return true
9410 }
9411
9412
9413
9414 for {
9415 x := v_0
9416 if x.Op != OpAMD64MOVWload {
9417 break
9418 }
9419 off := auxIntToInt32(x.AuxInt)
9420 sym := auxToSym(x.Aux)
9421 mem := x.Args[1]
9422 ptr := x.Args[0]
9423 if !(x.Uses == 1 && clobber(x)) {
9424 break
9425 }
9426 b = x.Block
9427 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9428 v.copyOf(v0)
9429 v0.AuxInt = int32ToAuxInt(off)
9430 v0.Aux = symToAux(sym)
9431 v0.AddArg2(ptr, mem)
9432 return true
9433 }
9434
9435
9436
9437 for {
9438 x := v_0
9439 if x.Op != OpAMD64MOVLload {
9440 break
9441 }
9442 off := auxIntToInt32(x.AuxInt)
9443 sym := auxToSym(x.Aux)
9444 mem := x.Args[1]
9445 ptr := x.Args[0]
9446 if !(x.Uses == 1 && clobber(x)) {
9447 break
9448 }
9449 b = x.Block
9450 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9451 v.copyOf(v0)
9452 v0.AuxInt = int32ToAuxInt(off)
9453 v0.Aux = symToAux(sym)
9454 v0.AddArg2(ptr, mem)
9455 return true
9456 }
9457
9458
9459
9460 for {
9461 x := v_0
9462 if x.Op != OpAMD64MOVQload {
9463 break
9464 }
9465 off := auxIntToInt32(x.AuxInt)
9466 sym := auxToSym(x.Aux)
9467 mem := x.Args[1]
9468 ptr := x.Args[0]
9469 if !(x.Uses == 1 && clobber(x)) {
9470 break
9471 }
9472 b = x.Block
9473 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9474 v.copyOf(v0)
9475 v0.AuxInt = int32ToAuxInt(off)
9476 v0.Aux = symToAux(sym)
9477 v0.AddArg2(ptr, mem)
9478 return true
9479 }
9480
9481
9482
9483 for {
9484 if v_0.Op != OpAMD64ANDLconst {
9485 break
9486 }
9487 c := auxIntToInt32(v_0.AuxInt)
9488 x := v_0.Args[0]
9489 if !(c&0x80 == 0) {
9490 break
9491 }
9492 v.reset(OpAMD64ANDLconst)
9493 v.AuxInt = int32ToAuxInt(c & 0x7f)
9494 v.AddArg(x)
9495 return true
9496 }
9497
9498
9499 for {
9500 if v_0.Op != OpAMD64MOVBQSX {
9501 break
9502 }
9503 x := v_0.Args[0]
9504 v.reset(OpAMD64MOVBQSX)
9505 v.AddArg(x)
9506 return true
9507 }
9508 return false
9509 }
9510 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9511 v_1 := v.Args[1]
9512 v_0 := v.Args[0]
9513
9514
9515
9516 for {
9517 off := auxIntToInt32(v.AuxInt)
9518 sym := auxToSym(v.Aux)
9519 ptr := v_0
9520 if v_1.Op != OpAMD64MOVBstore {
9521 break
9522 }
9523 off2 := auxIntToInt32(v_1.AuxInt)
9524 sym2 := auxToSym(v_1.Aux)
9525 x := v_1.Args[1]
9526 ptr2 := v_1.Args[0]
9527 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9528 break
9529 }
9530 v.reset(OpAMD64MOVBQSX)
9531 v.AddArg(x)
9532 return true
9533 }
9534
9535
9536
9537 for {
9538 off1 := auxIntToInt32(v.AuxInt)
9539 sym1 := auxToSym(v.Aux)
9540 if v_0.Op != OpAMD64LEAQ {
9541 break
9542 }
9543 off2 := auxIntToInt32(v_0.AuxInt)
9544 sym2 := auxToSym(v_0.Aux)
9545 base := v_0.Args[0]
9546 mem := v_1
9547 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9548 break
9549 }
9550 v.reset(OpAMD64MOVBQSXload)
9551 v.AuxInt = int32ToAuxInt(off1 + off2)
9552 v.Aux = symToAux(mergeSym(sym1, sym2))
9553 v.AddArg2(base, mem)
9554 return true
9555 }
9556 return false
9557 }
9558 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9559 v_0 := v.Args[0]
9560 b := v.Block
9561
9562
9563
9564 for {
9565 x := v_0
9566 if x.Op != OpAMD64MOVBload {
9567 break
9568 }
9569 off := auxIntToInt32(x.AuxInt)
9570 sym := auxToSym(x.Aux)
9571 mem := x.Args[1]
9572 ptr := x.Args[0]
9573 if !(x.Uses == 1 && clobber(x)) {
9574 break
9575 }
9576 b = x.Block
9577 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9578 v.copyOf(v0)
9579 v0.AuxInt = int32ToAuxInt(off)
9580 v0.Aux = symToAux(sym)
9581 v0.AddArg2(ptr, mem)
9582 return true
9583 }
9584
9585
9586
9587 for {
9588 x := v_0
9589 if x.Op != OpAMD64MOVWload {
9590 break
9591 }
9592 off := auxIntToInt32(x.AuxInt)
9593 sym := auxToSym(x.Aux)
9594 mem := x.Args[1]
9595 ptr := x.Args[0]
9596 if !(x.Uses == 1 && clobber(x)) {
9597 break
9598 }
9599 b = x.Block
9600 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9601 v.copyOf(v0)
9602 v0.AuxInt = int32ToAuxInt(off)
9603 v0.Aux = symToAux(sym)
9604 v0.AddArg2(ptr, mem)
9605 return true
9606 }
9607
9608
9609
9610 for {
9611 x := v_0
9612 if x.Op != OpAMD64MOVLload {
9613 break
9614 }
9615 off := auxIntToInt32(x.AuxInt)
9616 sym := auxToSym(x.Aux)
9617 mem := x.Args[1]
9618 ptr := x.Args[0]
9619 if !(x.Uses == 1 && clobber(x)) {
9620 break
9621 }
9622 b = x.Block
9623 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9624 v.copyOf(v0)
9625 v0.AuxInt = int32ToAuxInt(off)
9626 v0.Aux = symToAux(sym)
9627 v0.AddArg2(ptr, mem)
9628 return true
9629 }
9630
9631
9632
9633 for {
9634 x := v_0
9635 if x.Op != OpAMD64MOVQload {
9636 break
9637 }
9638 off := auxIntToInt32(x.AuxInt)
9639 sym := auxToSym(x.Aux)
9640 mem := x.Args[1]
9641 ptr := x.Args[0]
9642 if !(x.Uses == 1 && clobber(x)) {
9643 break
9644 }
9645 b = x.Block
9646 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9647 v.copyOf(v0)
9648 v0.AuxInt = int32ToAuxInt(off)
9649 v0.Aux = symToAux(sym)
9650 v0.AddArg2(ptr, mem)
9651 return true
9652 }
9653
9654
9655 for {
9656 if v_0.Op != OpAMD64ANDLconst {
9657 break
9658 }
9659 c := auxIntToInt32(v_0.AuxInt)
9660 x := v_0.Args[0]
9661 v.reset(OpAMD64ANDLconst)
9662 v.AuxInt = int32ToAuxInt(c & 0xff)
9663 v.AddArg(x)
9664 return true
9665 }
9666
9667
9668 for {
9669 if v_0.Op != OpAMD64MOVBQZX {
9670 break
9671 }
9672 x := v_0.Args[0]
9673 v.reset(OpAMD64MOVBQZX)
9674 v.AddArg(x)
9675 return true
9676 }
9677 return false
9678 }
9679 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
9680 v_1 := v.Args[1]
9681 v_0 := v.Args[0]
9682
9683
9684
9685 for {
9686 off1 := auxIntToInt32(v.AuxInt)
9687 sym := auxToSym(v.Aux)
9688 if v_0.Op != OpAMD64ADDQconst {
9689 break
9690 }
9691 off2 := auxIntToInt32(v_0.AuxInt)
9692 ptr := v_0.Args[0]
9693 mem := v_1
9694 if !(is32Bit(int64(off1) + int64(off2))) {
9695 break
9696 }
9697 v.reset(OpAMD64MOVBatomicload)
9698 v.AuxInt = int32ToAuxInt(off1 + off2)
9699 v.Aux = symToAux(sym)
9700 v.AddArg2(ptr, mem)
9701 return true
9702 }
9703
9704
9705
9706 for {
9707 off1 := auxIntToInt32(v.AuxInt)
9708 sym1 := auxToSym(v.Aux)
9709 if v_0.Op != OpAMD64LEAQ {
9710 break
9711 }
9712 off2 := auxIntToInt32(v_0.AuxInt)
9713 sym2 := auxToSym(v_0.Aux)
9714 ptr := v_0.Args[0]
9715 mem := v_1
9716 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9717 break
9718 }
9719 v.reset(OpAMD64MOVBatomicload)
9720 v.AuxInt = int32ToAuxInt(off1 + off2)
9721 v.Aux = symToAux(mergeSym(sym1, sym2))
9722 v.AddArg2(ptr, mem)
9723 return true
9724 }
9725 return false
9726 }
9727 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
9728 v_1 := v.Args[1]
9729 v_0 := v.Args[0]
9730
9731
9732
9733 for {
9734 off := auxIntToInt32(v.AuxInt)
9735 sym := auxToSym(v.Aux)
9736 ptr := v_0
9737 if v_1.Op != OpAMD64MOVBstore {
9738 break
9739 }
9740 off2 := auxIntToInt32(v_1.AuxInt)
9741 sym2 := auxToSym(v_1.Aux)
9742 x := v_1.Args[1]
9743 ptr2 := v_1.Args[0]
9744 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9745 break
9746 }
9747 v.reset(OpAMD64MOVBQZX)
9748 v.AddArg(x)
9749 return true
9750 }
9751
9752
9753
9754 for {
9755 off1 := auxIntToInt32(v.AuxInt)
9756 sym := auxToSym(v.Aux)
9757 if v_0.Op != OpAMD64ADDQconst {
9758 break
9759 }
9760 off2 := auxIntToInt32(v_0.AuxInt)
9761 ptr := v_0.Args[0]
9762 mem := v_1
9763 if !(is32Bit(int64(off1) + int64(off2))) {
9764 break
9765 }
9766 v.reset(OpAMD64MOVBload)
9767 v.AuxInt = int32ToAuxInt(off1 + off2)
9768 v.Aux = symToAux(sym)
9769 v.AddArg2(ptr, mem)
9770 return true
9771 }
9772
9773
9774
9775 for {
9776 off1 := auxIntToInt32(v.AuxInt)
9777 sym1 := auxToSym(v.Aux)
9778 if v_0.Op != OpAMD64LEAQ {
9779 break
9780 }
9781 off2 := auxIntToInt32(v_0.AuxInt)
9782 sym2 := auxToSym(v_0.Aux)
9783 base := v_0.Args[0]
9784 mem := v_1
9785 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9786 break
9787 }
9788 v.reset(OpAMD64MOVBload)
9789 v.AuxInt = int32ToAuxInt(off1 + off2)
9790 v.Aux = symToAux(mergeSym(sym1, sym2))
9791 v.AddArg2(base, mem)
9792 return true
9793 }
9794
9795
9796
9797 for {
9798 off := auxIntToInt32(v.AuxInt)
9799 sym := auxToSym(v.Aux)
9800 if v_0.Op != OpSB || !(symIsRO(sym)) {
9801 break
9802 }
9803 v.reset(OpAMD64MOVLconst)
9804 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
9805 return true
9806 }
9807 return false
9808 }
9809 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
9810 v_2 := v.Args[2]
9811 v_1 := v.Args[1]
9812 v_0 := v.Args[0]
9813
9814
9815
9816 for {
9817 off := auxIntToInt32(v.AuxInt)
9818 sym := auxToSym(v.Aux)
9819 ptr := v_0
9820 y := v_1
9821 if y.Op != OpAMD64SETL {
9822 break
9823 }
9824 x := y.Args[0]
9825 mem := v_2
9826 if !(y.Uses == 1) {
9827 break
9828 }
9829 v.reset(OpAMD64SETLstore)
9830 v.AuxInt = int32ToAuxInt(off)
9831 v.Aux = symToAux(sym)
9832 v.AddArg3(ptr, x, mem)
9833 return true
9834 }
9835
9836
9837
9838 for {
9839 off := auxIntToInt32(v.AuxInt)
9840 sym := auxToSym(v.Aux)
9841 ptr := v_0
9842 y := v_1
9843 if y.Op != OpAMD64SETLE {
9844 break
9845 }
9846 x := y.Args[0]
9847 mem := v_2
9848 if !(y.Uses == 1) {
9849 break
9850 }
9851 v.reset(OpAMD64SETLEstore)
9852 v.AuxInt = int32ToAuxInt(off)
9853 v.Aux = symToAux(sym)
9854 v.AddArg3(ptr, x, mem)
9855 return true
9856 }
9857
9858
9859
9860 for {
9861 off := auxIntToInt32(v.AuxInt)
9862 sym := auxToSym(v.Aux)
9863 ptr := v_0
9864 y := v_1
9865 if y.Op != OpAMD64SETG {
9866 break
9867 }
9868 x := y.Args[0]
9869 mem := v_2
9870 if !(y.Uses == 1) {
9871 break
9872 }
9873 v.reset(OpAMD64SETGstore)
9874 v.AuxInt = int32ToAuxInt(off)
9875 v.Aux = symToAux(sym)
9876 v.AddArg3(ptr, x, mem)
9877 return true
9878 }
9879
9880
9881
9882 for {
9883 off := auxIntToInt32(v.AuxInt)
9884 sym := auxToSym(v.Aux)
9885 ptr := v_0
9886 y := v_1
9887 if y.Op != OpAMD64SETGE {
9888 break
9889 }
9890 x := y.Args[0]
9891 mem := v_2
9892 if !(y.Uses == 1) {
9893 break
9894 }
9895 v.reset(OpAMD64SETGEstore)
9896 v.AuxInt = int32ToAuxInt(off)
9897 v.Aux = symToAux(sym)
9898 v.AddArg3(ptr, x, mem)
9899 return true
9900 }
9901
9902
9903
9904 for {
9905 off := auxIntToInt32(v.AuxInt)
9906 sym := auxToSym(v.Aux)
9907 ptr := v_0
9908 y := v_1
9909 if y.Op != OpAMD64SETEQ {
9910 break
9911 }
9912 x := y.Args[0]
9913 mem := v_2
9914 if !(y.Uses == 1) {
9915 break
9916 }
9917 v.reset(OpAMD64SETEQstore)
9918 v.AuxInt = int32ToAuxInt(off)
9919 v.Aux = symToAux(sym)
9920 v.AddArg3(ptr, x, mem)
9921 return true
9922 }
9923
9924
9925
9926 for {
9927 off := auxIntToInt32(v.AuxInt)
9928 sym := auxToSym(v.Aux)
9929 ptr := v_0
9930 y := v_1
9931 if y.Op != OpAMD64SETNE {
9932 break
9933 }
9934 x := y.Args[0]
9935 mem := v_2
9936 if !(y.Uses == 1) {
9937 break
9938 }
9939 v.reset(OpAMD64SETNEstore)
9940 v.AuxInt = int32ToAuxInt(off)
9941 v.Aux = symToAux(sym)
9942 v.AddArg3(ptr, x, mem)
9943 return true
9944 }
9945
9946
9947
9948 for {
9949 off := auxIntToInt32(v.AuxInt)
9950 sym := auxToSym(v.Aux)
9951 ptr := v_0
9952 y := v_1
9953 if y.Op != OpAMD64SETB {
9954 break
9955 }
9956 x := y.Args[0]
9957 mem := v_2
9958 if !(y.Uses == 1) {
9959 break
9960 }
9961 v.reset(OpAMD64SETBstore)
9962 v.AuxInt = int32ToAuxInt(off)
9963 v.Aux = symToAux(sym)
9964 v.AddArg3(ptr, x, mem)
9965 return true
9966 }
9967
9968
9969
9970 for {
9971 off := auxIntToInt32(v.AuxInt)
9972 sym := auxToSym(v.Aux)
9973 ptr := v_0
9974 y := v_1
9975 if y.Op != OpAMD64SETBE {
9976 break
9977 }
9978 x := y.Args[0]
9979 mem := v_2
9980 if !(y.Uses == 1) {
9981 break
9982 }
9983 v.reset(OpAMD64SETBEstore)
9984 v.AuxInt = int32ToAuxInt(off)
9985 v.Aux = symToAux(sym)
9986 v.AddArg3(ptr, x, mem)
9987 return true
9988 }
9989
9990
9991
9992 for {
9993 off := auxIntToInt32(v.AuxInt)
9994 sym := auxToSym(v.Aux)
9995 ptr := v_0
9996 y := v_1
9997 if y.Op != OpAMD64SETA {
9998 break
9999 }
10000 x := y.Args[0]
10001 mem := v_2
10002 if !(y.Uses == 1) {
10003 break
10004 }
10005 v.reset(OpAMD64SETAstore)
10006 v.AuxInt = int32ToAuxInt(off)
10007 v.Aux = symToAux(sym)
10008 v.AddArg3(ptr, x, mem)
10009 return true
10010 }
10011
10012
10013
10014 for {
10015 off := auxIntToInt32(v.AuxInt)
10016 sym := auxToSym(v.Aux)
10017 ptr := v_0
10018 y := v_1
10019 if y.Op != OpAMD64SETAE {
10020 break
10021 }
10022 x := y.Args[0]
10023 mem := v_2
10024 if !(y.Uses == 1) {
10025 break
10026 }
10027 v.reset(OpAMD64SETAEstore)
10028 v.AuxInt = int32ToAuxInt(off)
10029 v.Aux = symToAux(sym)
10030 v.AddArg3(ptr, x, mem)
10031 return true
10032 }
10033
10034
10035 for {
10036 off := auxIntToInt32(v.AuxInt)
10037 sym := auxToSym(v.Aux)
10038 ptr := v_0
10039 if v_1.Op != OpAMD64MOVBQSX {
10040 break
10041 }
10042 x := v_1.Args[0]
10043 mem := v_2
10044 v.reset(OpAMD64MOVBstore)
10045 v.AuxInt = int32ToAuxInt(off)
10046 v.Aux = symToAux(sym)
10047 v.AddArg3(ptr, x, mem)
10048 return true
10049 }
10050
10051
10052 for {
10053 off := auxIntToInt32(v.AuxInt)
10054 sym := auxToSym(v.Aux)
10055 ptr := v_0
10056 if v_1.Op != OpAMD64MOVBQZX {
10057 break
10058 }
10059 x := v_1.Args[0]
10060 mem := v_2
10061 v.reset(OpAMD64MOVBstore)
10062 v.AuxInt = int32ToAuxInt(off)
10063 v.Aux = symToAux(sym)
10064 v.AddArg3(ptr, x, mem)
10065 return true
10066 }
10067
10068
10069
10070 for {
10071 off1 := auxIntToInt32(v.AuxInt)
10072 sym := auxToSym(v.Aux)
10073 if v_0.Op != OpAMD64ADDQconst {
10074 break
10075 }
10076 off2 := auxIntToInt32(v_0.AuxInt)
10077 ptr := v_0.Args[0]
10078 val := v_1
10079 mem := v_2
10080 if !(is32Bit(int64(off1) + int64(off2))) {
10081 break
10082 }
10083 v.reset(OpAMD64MOVBstore)
10084 v.AuxInt = int32ToAuxInt(off1 + off2)
10085 v.Aux = symToAux(sym)
10086 v.AddArg3(ptr, val, mem)
10087 return true
10088 }
10089
10090
10091 for {
10092 off := auxIntToInt32(v.AuxInt)
10093 sym := auxToSym(v.Aux)
10094 ptr := v_0
10095 if v_1.Op != OpAMD64MOVLconst {
10096 break
10097 }
10098 c := auxIntToInt32(v_1.AuxInt)
10099 mem := v_2
10100 v.reset(OpAMD64MOVBstoreconst)
10101 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10102 v.Aux = symToAux(sym)
10103 v.AddArg2(ptr, mem)
10104 return true
10105 }
10106
10107
10108 for {
10109 off := auxIntToInt32(v.AuxInt)
10110 sym := auxToSym(v.Aux)
10111 ptr := v_0
10112 if v_1.Op != OpAMD64MOVQconst {
10113 break
10114 }
10115 c := auxIntToInt64(v_1.AuxInt)
10116 mem := v_2
10117 v.reset(OpAMD64MOVBstoreconst)
10118 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10119 v.Aux = symToAux(sym)
10120 v.AddArg2(ptr, mem)
10121 return true
10122 }
10123
10124
10125
10126 for {
10127 off1 := auxIntToInt32(v.AuxInt)
10128 sym1 := auxToSym(v.Aux)
10129 if v_0.Op != OpAMD64LEAQ {
10130 break
10131 }
10132 off2 := auxIntToInt32(v_0.AuxInt)
10133 sym2 := auxToSym(v_0.Aux)
10134 base := v_0.Args[0]
10135 val := v_1
10136 mem := v_2
10137 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10138 break
10139 }
10140 v.reset(OpAMD64MOVBstore)
10141 v.AuxInt = int32ToAuxInt(off1 + off2)
10142 v.Aux = symToAux(mergeSym(sym1, sym2))
10143 v.AddArg3(base, val, mem)
10144 return true
10145 }
10146 return false
10147 }
10148 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
10149 v_1 := v.Args[1]
10150 v_0 := v.Args[0]
10151
10152
10153
10154 for {
10155 sc := auxIntToValAndOff(v.AuxInt)
10156 s := auxToSym(v.Aux)
10157 if v_0.Op != OpAMD64ADDQconst {
10158 break
10159 }
10160 off := auxIntToInt32(v_0.AuxInt)
10161 ptr := v_0.Args[0]
10162 mem := v_1
10163 if !(ValAndOff(sc).canAdd32(off)) {
10164 break
10165 }
10166 v.reset(OpAMD64MOVBstoreconst)
10167 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10168 v.Aux = symToAux(s)
10169 v.AddArg2(ptr, mem)
10170 return true
10171 }
10172
10173
10174
10175 for {
10176 sc := auxIntToValAndOff(v.AuxInt)
10177 sym1 := auxToSym(v.Aux)
10178 if v_0.Op != OpAMD64LEAQ {
10179 break
10180 }
10181 off := auxIntToInt32(v_0.AuxInt)
10182 sym2 := auxToSym(v_0.Aux)
10183 ptr := v_0.Args[0]
10184 mem := v_1
10185 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
10186 break
10187 }
10188 v.reset(OpAMD64MOVBstoreconst)
10189 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10190 v.Aux = symToAux(mergeSym(sym1, sym2))
10191 v.AddArg2(ptr, mem)
10192 return true
10193 }
10194 return false
10195 }
10196 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
10197 v_0 := v.Args[0]
10198 b := v.Block
10199
10200
10201
10202 for {
10203 x := v_0
10204 if x.Op != OpAMD64MOVLload {
10205 break
10206 }
10207 off := auxIntToInt32(x.AuxInt)
10208 sym := auxToSym(x.Aux)
10209 mem := x.Args[1]
10210 ptr := x.Args[0]
10211 if !(x.Uses == 1 && clobber(x)) {
10212 break
10213 }
10214 b = x.Block
10215 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10216 v.copyOf(v0)
10217 v0.AuxInt = int32ToAuxInt(off)
10218 v0.Aux = symToAux(sym)
10219 v0.AddArg2(ptr, mem)
10220 return true
10221 }
10222
10223
10224
10225 for {
10226 x := v_0
10227 if x.Op != OpAMD64MOVQload {
10228 break
10229 }
10230 off := auxIntToInt32(x.AuxInt)
10231 sym := auxToSym(x.Aux)
10232 mem := x.Args[1]
10233 ptr := x.Args[0]
10234 if !(x.Uses == 1 && clobber(x)) {
10235 break
10236 }
10237 b = x.Block
10238 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10239 v.copyOf(v0)
10240 v0.AuxInt = int32ToAuxInt(off)
10241 v0.Aux = symToAux(sym)
10242 v0.AddArg2(ptr, mem)
10243 return true
10244 }
10245
10246
10247
10248 for {
10249 if v_0.Op != OpAMD64ANDLconst {
10250 break
10251 }
10252 c := auxIntToInt32(v_0.AuxInt)
10253 x := v_0.Args[0]
10254 if !(uint32(c)&0x80000000 == 0) {
10255 break
10256 }
10257 v.reset(OpAMD64ANDLconst)
10258 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
10259 v.AddArg(x)
10260 return true
10261 }
10262
10263
10264 for {
10265 if v_0.Op != OpAMD64MOVLQSX {
10266 break
10267 }
10268 x := v_0.Args[0]
10269 v.reset(OpAMD64MOVLQSX)
10270 v.AddArg(x)
10271 return true
10272 }
10273
10274
10275 for {
10276 if v_0.Op != OpAMD64MOVWQSX {
10277 break
10278 }
10279 x := v_0.Args[0]
10280 v.reset(OpAMD64MOVWQSX)
10281 v.AddArg(x)
10282 return true
10283 }
10284
10285
10286 for {
10287 if v_0.Op != OpAMD64MOVBQSX {
10288 break
10289 }
10290 x := v_0.Args[0]
10291 v.reset(OpAMD64MOVBQSX)
10292 v.AddArg(x)
10293 return true
10294 }
10295 return false
10296 }
10297 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
10298 v_1 := v.Args[1]
10299 v_0 := v.Args[0]
10300
10301
10302
10303 for {
10304 off := auxIntToInt32(v.AuxInt)
10305 sym := auxToSym(v.Aux)
10306 ptr := v_0
10307 if v_1.Op != OpAMD64MOVLstore {
10308 break
10309 }
10310 off2 := auxIntToInt32(v_1.AuxInt)
10311 sym2 := auxToSym(v_1.Aux)
10312 x := v_1.Args[1]
10313 ptr2 := v_1.Args[0]
10314 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10315 break
10316 }
10317 v.reset(OpAMD64MOVLQSX)
10318 v.AddArg(x)
10319 return true
10320 }
10321
10322
10323
10324 for {
10325 off1 := auxIntToInt32(v.AuxInt)
10326 sym1 := auxToSym(v.Aux)
10327 if v_0.Op != OpAMD64LEAQ {
10328 break
10329 }
10330 off2 := auxIntToInt32(v_0.AuxInt)
10331 sym2 := auxToSym(v_0.Aux)
10332 base := v_0.Args[0]
10333 mem := v_1
10334 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10335 break
10336 }
10337 v.reset(OpAMD64MOVLQSXload)
10338 v.AuxInt = int32ToAuxInt(off1 + off2)
10339 v.Aux = symToAux(mergeSym(sym1, sym2))
10340 v.AddArg2(base, mem)
10341 return true
10342 }
10343 return false
10344 }
10345 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
10346 v_0 := v.Args[0]
10347 b := v.Block
10348
10349
10350
10351 for {
10352 x := v_0
10353 if x.Op != OpAMD64MOVLload {
10354 break
10355 }
10356 off := auxIntToInt32(x.AuxInt)
10357 sym := auxToSym(x.Aux)
10358 mem := x.Args[1]
10359 ptr := x.Args[0]
10360 if !(x.Uses == 1 && clobber(x)) {
10361 break
10362 }
10363 b = x.Block
10364 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10365 v.copyOf(v0)
10366 v0.AuxInt = int32ToAuxInt(off)
10367 v0.Aux = symToAux(sym)
10368 v0.AddArg2(ptr, mem)
10369 return true
10370 }
10371
10372
10373
10374 for {
10375 x := v_0
10376 if x.Op != OpAMD64MOVQload {
10377 break
10378 }
10379 off := auxIntToInt32(x.AuxInt)
10380 sym := auxToSym(x.Aux)
10381 mem := x.Args[1]
10382 ptr := x.Args[0]
10383 if !(x.Uses == 1 && clobber(x)) {
10384 break
10385 }
10386 b = x.Block
10387 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10388 v.copyOf(v0)
10389 v0.AuxInt = int32ToAuxInt(off)
10390 v0.Aux = symToAux(sym)
10391 v0.AddArg2(ptr, mem)
10392 return true
10393 }
10394
10395
10396 for {
10397 if v_0.Op != OpAMD64ANDLconst {
10398 break
10399 }
10400 c := auxIntToInt32(v_0.AuxInt)
10401 x := v_0.Args[0]
10402 v.reset(OpAMD64ANDLconst)
10403 v.AuxInt = int32ToAuxInt(c)
10404 v.AddArg(x)
10405 return true
10406 }
10407
10408
10409 for {
10410 if v_0.Op != OpAMD64MOVLQZX {
10411 break
10412 }
10413 x := v_0.Args[0]
10414 v.reset(OpAMD64MOVLQZX)
10415 v.AddArg(x)
10416 return true
10417 }
10418
10419
10420 for {
10421 if v_0.Op != OpAMD64MOVWQZX {
10422 break
10423 }
10424 x := v_0.Args[0]
10425 v.reset(OpAMD64MOVWQZX)
10426 v.AddArg(x)
10427 return true
10428 }
10429
10430
10431 for {
10432 if v_0.Op != OpAMD64MOVBQZX {
10433 break
10434 }
10435 x := v_0.Args[0]
10436 v.reset(OpAMD64MOVBQZX)
10437 v.AddArg(x)
10438 return true
10439 }
10440 return false
10441 }
10442 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
10443 v_1 := v.Args[1]
10444 v_0 := v.Args[0]
10445
10446
10447
10448 for {
10449 off1 := auxIntToInt32(v.AuxInt)
10450 sym := auxToSym(v.Aux)
10451 if v_0.Op != OpAMD64ADDQconst {
10452 break
10453 }
10454 off2 := auxIntToInt32(v_0.AuxInt)
10455 ptr := v_0.Args[0]
10456 mem := v_1
10457 if !(is32Bit(int64(off1) + int64(off2))) {
10458 break
10459 }
10460 v.reset(OpAMD64MOVLatomicload)
10461 v.AuxInt = int32ToAuxInt(off1 + off2)
10462 v.Aux = symToAux(sym)
10463 v.AddArg2(ptr, mem)
10464 return true
10465 }
10466
10467
10468
10469 for {
10470 off1 := auxIntToInt32(v.AuxInt)
10471 sym1 := auxToSym(v.Aux)
10472 if v_0.Op != OpAMD64LEAQ {
10473 break
10474 }
10475 off2 := auxIntToInt32(v_0.AuxInt)
10476 sym2 := auxToSym(v_0.Aux)
10477 ptr := v_0.Args[0]
10478 mem := v_1
10479 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10480 break
10481 }
10482 v.reset(OpAMD64MOVLatomicload)
10483 v.AuxInt = int32ToAuxInt(off1 + off2)
10484 v.Aux = symToAux(mergeSym(sym1, sym2))
10485 v.AddArg2(ptr, mem)
10486 return true
10487 }
10488 return false
10489 }
10490 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
10491 v_0 := v.Args[0]
10492 b := v.Block
10493
10494
10495
10496 for {
10497 t := v.Type
10498 if v_0.Op != OpArg {
10499 break
10500 }
10501 u := v_0.Type
10502 off := auxIntToInt32(v_0.AuxInt)
10503 sym := auxToSym(v_0.Aux)
10504 if !(t.Size() == u.Size()) {
10505 break
10506 }
10507 b = b.Func.Entry
10508 v0 := b.NewValue0(v.Pos, OpArg, t)
10509 v.copyOf(v0)
10510 v0.AuxInt = int32ToAuxInt(off)
10511 v0.Aux = symToAux(sym)
10512 return true
10513 }
10514 return false
10515 }
10516 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
10517 v_0 := v.Args[0]
10518 b := v.Block
10519
10520
10521
10522 for {
10523 t := v.Type
10524 if v_0.Op != OpArg {
10525 break
10526 }
10527 u := v_0.Type
10528 off := auxIntToInt32(v_0.AuxInt)
10529 sym := auxToSym(v_0.Aux)
10530 if !(t.Size() == u.Size()) {
10531 break
10532 }
10533 b = b.Func.Entry
10534 v0 := b.NewValue0(v.Pos, OpArg, t)
10535 v.copyOf(v0)
10536 v0.AuxInt = int32ToAuxInt(off)
10537 v0.Aux = symToAux(sym)
10538 return true
10539 }
10540 return false
10541 }
10542 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
10543 v_1 := v.Args[1]
10544 v_0 := v.Args[0]
10545 b := v.Block
10546 config := b.Func.Config
10547
10548
10549
10550 for {
10551 off := auxIntToInt32(v.AuxInt)
10552 sym := auxToSym(v.Aux)
10553 ptr := v_0
10554 if v_1.Op != OpAMD64MOVLstore {
10555 break
10556 }
10557 off2 := auxIntToInt32(v_1.AuxInt)
10558 sym2 := auxToSym(v_1.Aux)
10559 x := v_1.Args[1]
10560 ptr2 := v_1.Args[0]
10561 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10562 break
10563 }
10564 v.reset(OpAMD64MOVLQZX)
10565 v.AddArg(x)
10566 return true
10567 }
10568
10569
10570
10571 for {
10572 off1 := auxIntToInt32(v.AuxInt)
10573 sym := auxToSym(v.Aux)
10574 if v_0.Op != OpAMD64ADDQconst {
10575 break
10576 }
10577 off2 := auxIntToInt32(v_0.AuxInt)
10578 ptr := v_0.Args[0]
10579 mem := v_1
10580 if !(is32Bit(int64(off1) + int64(off2))) {
10581 break
10582 }
10583 v.reset(OpAMD64MOVLload)
10584 v.AuxInt = int32ToAuxInt(off1 + off2)
10585 v.Aux = symToAux(sym)
10586 v.AddArg2(ptr, mem)
10587 return true
10588 }
10589
10590
10591
10592 for {
10593 off1 := auxIntToInt32(v.AuxInt)
10594 sym1 := auxToSym(v.Aux)
10595 if v_0.Op != OpAMD64LEAQ {
10596 break
10597 }
10598 off2 := auxIntToInt32(v_0.AuxInt)
10599 sym2 := auxToSym(v_0.Aux)
10600 base := v_0.Args[0]
10601 mem := v_1
10602 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10603 break
10604 }
10605 v.reset(OpAMD64MOVLload)
10606 v.AuxInt = int32ToAuxInt(off1 + off2)
10607 v.Aux = symToAux(mergeSym(sym1, sym2))
10608 v.AddArg2(base, mem)
10609 return true
10610 }
10611
10612
10613 for {
10614 off := auxIntToInt32(v.AuxInt)
10615 sym := auxToSym(v.Aux)
10616 ptr := v_0
10617 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
10618 break
10619 }
10620 val := v_1.Args[1]
10621 if ptr != v_1.Args[0] {
10622 break
10623 }
10624 v.reset(OpAMD64MOVLf2i)
10625 v.AddArg(val)
10626 return true
10627 }
10628
10629
10630
10631 for {
10632 off := auxIntToInt32(v.AuxInt)
10633 sym := auxToSym(v.Aux)
10634 if v_0.Op != OpSB || !(symIsRO(sym)) {
10635 break
10636 }
10637 v.reset(OpAMD64MOVQconst)
10638 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
10639 return true
10640 }
10641 return false
10642 }
10643 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
10644 v_2 := v.Args[2]
10645 v_1 := v.Args[1]
10646 v_0 := v.Args[0]
10647
10648
10649 for {
10650 off := auxIntToInt32(v.AuxInt)
10651 sym := auxToSym(v.Aux)
10652 ptr := v_0
10653 if v_1.Op != OpAMD64MOVLQSX {
10654 break
10655 }
10656 x := v_1.Args[0]
10657 mem := v_2
10658 v.reset(OpAMD64MOVLstore)
10659 v.AuxInt = int32ToAuxInt(off)
10660 v.Aux = symToAux(sym)
10661 v.AddArg3(ptr, x, mem)
10662 return true
10663 }
10664
10665
10666 for {
10667 off := auxIntToInt32(v.AuxInt)
10668 sym := auxToSym(v.Aux)
10669 ptr := v_0
10670 if v_1.Op != OpAMD64MOVLQZX {
10671 break
10672 }
10673 x := v_1.Args[0]
10674 mem := v_2
10675 v.reset(OpAMD64MOVLstore)
10676 v.AuxInt = int32ToAuxInt(off)
10677 v.Aux = symToAux(sym)
10678 v.AddArg3(ptr, x, mem)
10679 return true
10680 }
10681
10682
10683
10684 for {
10685 off1 := auxIntToInt32(v.AuxInt)
10686 sym := auxToSym(v.Aux)
10687 if v_0.Op != OpAMD64ADDQconst {
10688 break
10689 }
10690 off2 := auxIntToInt32(v_0.AuxInt)
10691 ptr := v_0.Args[0]
10692 val := v_1
10693 mem := v_2
10694 if !(is32Bit(int64(off1) + int64(off2))) {
10695 break
10696 }
10697 v.reset(OpAMD64MOVLstore)
10698 v.AuxInt = int32ToAuxInt(off1 + off2)
10699 v.Aux = symToAux(sym)
10700 v.AddArg3(ptr, val, mem)
10701 return true
10702 }
10703
10704
10705 for {
10706 off := auxIntToInt32(v.AuxInt)
10707 sym := auxToSym(v.Aux)
10708 ptr := v_0
10709 if v_1.Op != OpAMD64MOVLconst {
10710 break
10711 }
10712 c := auxIntToInt32(v_1.AuxInt)
10713 mem := v_2
10714 v.reset(OpAMD64MOVLstoreconst)
10715 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
10716 v.Aux = symToAux(sym)
10717 v.AddArg2(ptr, mem)
10718 return true
10719 }
10720
10721
10722 for {
10723 off := auxIntToInt32(v.AuxInt)
10724 sym := auxToSym(v.Aux)
10725 ptr := v_0
10726 if v_1.Op != OpAMD64MOVQconst {
10727 break
10728 }
10729 c := auxIntToInt64(v_1.AuxInt)
10730 mem := v_2
10731 v.reset(OpAMD64MOVLstoreconst)
10732 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
10733 v.Aux = symToAux(sym)
10734 v.AddArg2(ptr, mem)
10735 return true
10736 }
10737
10738
10739
10740 for {
10741 off1 := auxIntToInt32(v.AuxInt)
10742 sym1 := auxToSym(v.Aux)
10743 if v_0.Op != OpAMD64LEAQ {
10744 break
10745 }
10746 off2 := auxIntToInt32(v_0.AuxInt)
10747 sym2 := auxToSym(v_0.Aux)
10748 base := v_0.Args[0]
10749 val := v_1
10750 mem := v_2
10751 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10752 break
10753 }
10754 v.reset(OpAMD64MOVLstore)
10755 v.AuxInt = int32ToAuxInt(off1 + off2)
10756 v.Aux = symToAux(mergeSym(sym1, sym2))
10757 v.AddArg3(base, val, mem)
10758 return true
10759 }
10760
10761
10762
10763 for {
10764 off := auxIntToInt32(v.AuxInt)
10765 sym := auxToSym(v.Aux)
10766 ptr := v_0
10767 y := v_1
10768 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10769 break
10770 }
10771 mem := y.Args[2]
10772 x := y.Args[0]
10773 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10774 break
10775 }
10776 v.reset(OpAMD64ADDLmodify)
10777 v.AuxInt = int32ToAuxInt(off)
10778 v.Aux = symToAux(sym)
10779 v.AddArg3(ptr, x, mem)
10780 return true
10781 }
10782
10783
10784
10785 for {
10786 off := auxIntToInt32(v.AuxInt)
10787 sym := auxToSym(v.Aux)
10788 ptr := v_0
10789 y := v_1
10790 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10791 break
10792 }
10793 mem := y.Args[2]
10794 x := y.Args[0]
10795 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10796 break
10797 }
10798 v.reset(OpAMD64ANDLmodify)
10799 v.AuxInt = int32ToAuxInt(off)
10800 v.Aux = symToAux(sym)
10801 v.AddArg3(ptr, x, mem)
10802 return true
10803 }
10804
10805
10806
10807 for {
10808 off := auxIntToInt32(v.AuxInt)
10809 sym := auxToSym(v.Aux)
10810 ptr := v_0
10811 y := v_1
10812 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10813 break
10814 }
10815 mem := y.Args[2]
10816 x := y.Args[0]
10817 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10818 break
10819 }
10820 v.reset(OpAMD64ORLmodify)
10821 v.AuxInt = int32ToAuxInt(off)
10822 v.Aux = symToAux(sym)
10823 v.AddArg3(ptr, x, mem)
10824 return true
10825 }
10826
10827
10828
10829 for {
10830 off := auxIntToInt32(v.AuxInt)
10831 sym := auxToSym(v.Aux)
10832 ptr := v_0
10833 y := v_1
10834 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10835 break
10836 }
10837 mem := y.Args[2]
10838 x := y.Args[0]
10839 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10840 break
10841 }
10842 v.reset(OpAMD64XORLmodify)
10843 v.AuxInt = int32ToAuxInt(off)
10844 v.Aux = symToAux(sym)
10845 v.AddArg3(ptr, x, mem)
10846 return true
10847 }
10848
10849
10850
10851 for {
10852 off := auxIntToInt32(v.AuxInt)
10853 sym := auxToSym(v.Aux)
10854 ptr := v_0
10855 y := v_1
10856 if y.Op != OpAMD64ADDL {
10857 break
10858 }
10859 _ = y.Args[1]
10860 y_0 := y.Args[0]
10861 y_1 := y.Args[1]
10862 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10863 l := y_0
10864 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10865 continue
10866 }
10867 mem := l.Args[1]
10868 if ptr != l.Args[0] {
10869 continue
10870 }
10871 x := y_1
10872 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10873 continue
10874 }
10875 v.reset(OpAMD64ADDLmodify)
10876 v.AuxInt = int32ToAuxInt(off)
10877 v.Aux = symToAux(sym)
10878 v.AddArg3(ptr, x, mem)
10879 return true
10880 }
10881 break
10882 }
10883
10884
10885
10886 for {
10887 off := auxIntToInt32(v.AuxInt)
10888 sym := auxToSym(v.Aux)
10889 ptr := v_0
10890 y := v_1
10891 if y.Op != OpAMD64SUBL {
10892 break
10893 }
10894 x := y.Args[1]
10895 l := y.Args[0]
10896 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10897 break
10898 }
10899 mem := l.Args[1]
10900 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10901 break
10902 }
10903 v.reset(OpAMD64SUBLmodify)
10904 v.AuxInt = int32ToAuxInt(off)
10905 v.Aux = symToAux(sym)
10906 v.AddArg3(ptr, x, mem)
10907 return true
10908 }
10909
10910
10911
10912 for {
10913 off := auxIntToInt32(v.AuxInt)
10914 sym := auxToSym(v.Aux)
10915 ptr := v_0
10916 y := v_1
10917 if y.Op != OpAMD64ANDL {
10918 break
10919 }
10920 _ = y.Args[1]
10921 y_0 := y.Args[0]
10922 y_1 := y.Args[1]
10923 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10924 l := y_0
10925 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10926 continue
10927 }
10928 mem := l.Args[1]
10929 if ptr != l.Args[0] {
10930 continue
10931 }
10932 x := y_1
10933 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10934 continue
10935 }
10936 v.reset(OpAMD64ANDLmodify)
10937 v.AuxInt = int32ToAuxInt(off)
10938 v.Aux = symToAux(sym)
10939 v.AddArg3(ptr, x, mem)
10940 return true
10941 }
10942 break
10943 }
10944
10945
10946
10947 for {
10948 off := auxIntToInt32(v.AuxInt)
10949 sym := auxToSym(v.Aux)
10950 ptr := v_0
10951 y := v_1
10952 if y.Op != OpAMD64ORL {
10953 break
10954 }
10955 _ = y.Args[1]
10956 y_0 := y.Args[0]
10957 y_1 := y.Args[1]
10958 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10959 l := y_0
10960 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10961 continue
10962 }
10963 mem := l.Args[1]
10964 if ptr != l.Args[0] {
10965 continue
10966 }
10967 x := y_1
10968 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10969 continue
10970 }
10971 v.reset(OpAMD64ORLmodify)
10972 v.AuxInt = int32ToAuxInt(off)
10973 v.Aux = symToAux(sym)
10974 v.AddArg3(ptr, x, mem)
10975 return true
10976 }
10977 break
10978 }
10979
10980
10981
10982 for {
10983 off := auxIntToInt32(v.AuxInt)
10984 sym := auxToSym(v.Aux)
10985 ptr := v_0
10986 y := v_1
10987 if y.Op != OpAMD64XORL {
10988 break
10989 }
10990 _ = y.Args[1]
10991 y_0 := y.Args[0]
10992 y_1 := y.Args[1]
10993 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10994 l := y_0
10995 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10996 continue
10997 }
10998 mem := l.Args[1]
10999 if ptr != l.Args[0] {
11000 continue
11001 }
11002 x := y_1
11003 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11004 continue
11005 }
11006 v.reset(OpAMD64XORLmodify)
11007 v.AuxInt = int32ToAuxInt(off)
11008 v.Aux = symToAux(sym)
11009 v.AddArg3(ptr, x, mem)
11010 return true
11011 }
11012 break
11013 }
11014
11015
11016
11017 for {
11018 off := auxIntToInt32(v.AuxInt)
11019 sym := auxToSym(v.Aux)
11020 ptr := v_0
11021 a := v_1
11022 if a.Op != OpAMD64ADDLconst {
11023 break
11024 }
11025 c := auxIntToInt32(a.AuxInt)
11026 l := a.Args[0]
11027 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11028 break
11029 }
11030 mem := l.Args[1]
11031 ptr2 := l.Args[0]
11032 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11033 break
11034 }
11035 v.reset(OpAMD64ADDLconstmodify)
11036 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11037 v.Aux = symToAux(sym)
11038 v.AddArg2(ptr, mem)
11039 return true
11040 }
11041
11042
11043
11044 for {
11045 off := auxIntToInt32(v.AuxInt)
11046 sym := auxToSym(v.Aux)
11047 ptr := v_0
11048 a := v_1
11049 if a.Op != OpAMD64ANDLconst {
11050 break
11051 }
11052 c := auxIntToInt32(a.AuxInt)
11053 l := a.Args[0]
11054 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11055 break
11056 }
11057 mem := l.Args[1]
11058 ptr2 := l.Args[0]
11059 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11060 break
11061 }
11062 v.reset(OpAMD64ANDLconstmodify)
11063 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11064 v.Aux = symToAux(sym)
11065 v.AddArg2(ptr, mem)
11066 return true
11067 }
11068
11069
11070
11071 for {
11072 off := auxIntToInt32(v.AuxInt)
11073 sym := auxToSym(v.Aux)
11074 ptr := v_0
11075 a := v_1
11076 if a.Op != OpAMD64ORLconst {
11077 break
11078 }
11079 c := auxIntToInt32(a.AuxInt)
11080 l := a.Args[0]
11081 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11082 break
11083 }
11084 mem := l.Args[1]
11085 ptr2 := l.Args[0]
11086 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11087 break
11088 }
11089 v.reset(OpAMD64ORLconstmodify)
11090 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11091 v.Aux = symToAux(sym)
11092 v.AddArg2(ptr, mem)
11093 return true
11094 }
11095
11096
11097
11098 for {
11099 off := auxIntToInt32(v.AuxInt)
11100 sym := auxToSym(v.Aux)
11101 ptr := v_0
11102 a := v_1
11103 if a.Op != OpAMD64XORLconst {
11104 break
11105 }
11106 c := auxIntToInt32(a.AuxInt)
11107 l := a.Args[0]
11108 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11109 break
11110 }
11111 mem := l.Args[1]
11112 ptr2 := l.Args[0]
11113 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11114 break
11115 }
11116 v.reset(OpAMD64XORLconstmodify)
11117 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11118 v.Aux = symToAux(sym)
11119 v.AddArg2(ptr, mem)
11120 return true
11121 }
11122
11123
11124 for {
11125 off := auxIntToInt32(v.AuxInt)
11126 sym := auxToSym(v.Aux)
11127 ptr := v_0
11128 if v_1.Op != OpAMD64MOVLf2i {
11129 break
11130 }
11131 val := v_1.Args[0]
11132 mem := v_2
11133 v.reset(OpAMD64MOVSSstore)
11134 v.AuxInt = int32ToAuxInt(off)
11135 v.Aux = symToAux(sym)
11136 v.AddArg3(ptr, val, mem)
11137 return true
11138 }
11139
11140
11141
11142 for {
11143 i := auxIntToInt32(v.AuxInt)
11144 s := auxToSym(v.Aux)
11145 p := v_0
11146 x := v_1
11147 if x.Op != OpAMD64BSWAPL {
11148 break
11149 }
11150 w := x.Args[0]
11151 mem := v_2
11152 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
11153 break
11154 }
11155 v.reset(OpAMD64MOVBELstore)
11156 v.AuxInt = int32ToAuxInt(i)
11157 v.Aux = symToAux(s)
11158 v.AddArg3(p, w, mem)
11159 return true
11160 }
11161 return false
11162 }
11163 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
11164 v_1 := v.Args[1]
11165 v_0 := v.Args[0]
11166
11167
11168
11169 for {
11170 sc := auxIntToValAndOff(v.AuxInt)
11171 s := auxToSym(v.Aux)
11172 if v_0.Op != OpAMD64ADDQconst {
11173 break
11174 }
11175 off := auxIntToInt32(v_0.AuxInt)
11176 ptr := v_0.Args[0]
11177 mem := v_1
11178 if !(ValAndOff(sc).canAdd32(off)) {
11179 break
11180 }
11181 v.reset(OpAMD64MOVLstoreconst)
11182 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11183 v.Aux = symToAux(s)
11184 v.AddArg2(ptr, mem)
11185 return true
11186 }
11187
11188
11189
11190 for {
11191 sc := auxIntToValAndOff(v.AuxInt)
11192 sym1 := auxToSym(v.Aux)
11193 if v_0.Op != OpAMD64LEAQ {
11194 break
11195 }
11196 off := auxIntToInt32(v_0.AuxInt)
11197 sym2 := auxToSym(v_0.Aux)
11198 ptr := v_0.Args[0]
11199 mem := v_1
11200 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11201 break
11202 }
11203 v.reset(OpAMD64MOVLstoreconst)
11204 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11205 v.Aux = symToAux(mergeSym(sym1, sym2))
11206 v.AddArg2(ptr, mem)
11207 return true
11208 }
11209 return false
11210 }
11211 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
11212 v_1 := v.Args[1]
11213 v_0 := v.Args[0]
11214
11215
11216
11217 for {
11218 off1 := auxIntToInt32(v.AuxInt)
11219 sym := auxToSym(v.Aux)
11220 if v_0.Op != OpAMD64ADDQconst {
11221 break
11222 }
11223 off2 := auxIntToInt32(v_0.AuxInt)
11224 ptr := v_0.Args[0]
11225 mem := v_1
11226 if !(is32Bit(int64(off1) + int64(off2))) {
11227 break
11228 }
11229 v.reset(OpAMD64MOVOload)
11230 v.AuxInt = int32ToAuxInt(off1 + off2)
11231 v.Aux = symToAux(sym)
11232 v.AddArg2(ptr, mem)
11233 return true
11234 }
11235
11236
11237
11238 for {
11239 off1 := auxIntToInt32(v.AuxInt)
11240 sym1 := auxToSym(v.Aux)
11241 if v_0.Op != OpAMD64LEAQ {
11242 break
11243 }
11244 off2 := auxIntToInt32(v_0.AuxInt)
11245 sym2 := auxToSym(v_0.Aux)
11246 base := v_0.Args[0]
11247 mem := v_1
11248 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11249 break
11250 }
11251 v.reset(OpAMD64MOVOload)
11252 v.AuxInt = int32ToAuxInt(off1 + off2)
11253 v.Aux = symToAux(mergeSym(sym1, sym2))
11254 v.AddArg2(base, mem)
11255 return true
11256 }
11257 return false
11258 }
11259 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
11260 v_2 := v.Args[2]
11261 v_1 := v.Args[1]
11262 v_0 := v.Args[0]
11263 b := v.Block
11264 config := b.Func.Config
11265 typ := &b.Func.Config.Types
11266
11267
11268
11269 for {
11270 off1 := auxIntToInt32(v.AuxInt)
11271 sym := auxToSym(v.Aux)
11272 if v_0.Op != OpAMD64ADDQconst {
11273 break
11274 }
11275 off2 := auxIntToInt32(v_0.AuxInt)
11276 ptr := v_0.Args[0]
11277 val := v_1
11278 mem := v_2
11279 if !(is32Bit(int64(off1) + int64(off2))) {
11280 break
11281 }
11282 v.reset(OpAMD64MOVOstore)
11283 v.AuxInt = int32ToAuxInt(off1 + off2)
11284 v.Aux = symToAux(sym)
11285 v.AddArg3(ptr, val, mem)
11286 return true
11287 }
11288
11289
11290
11291 for {
11292 off1 := auxIntToInt32(v.AuxInt)
11293 sym1 := auxToSym(v.Aux)
11294 if v_0.Op != OpAMD64LEAQ {
11295 break
11296 }
11297 off2 := auxIntToInt32(v_0.AuxInt)
11298 sym2 := auxToSym(v_0.Aux)
11299 base := v_0.Args[0]
11300 val := v_1
11301 mem := v_2
11302 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11303 break
11304 }
11305 v.reset(OpAMD64MOVOstore)
11306 v.AuxInt = int32ToAuxInt(off1 + off2)
11307 v.Aux = symToAux(mergeSym(sym1, sym2))
11308 v.AddArg3(base, val, mem)
11309 return true
11310 }
11311
11312
11313
11314 for {
11315 dstOff := auxIntToInt32(v.AuxInt)
11316 dstSym := auxToSym(v.Aux)
11317 ptr := v_0
11318 if v_1.Op != OpAMD64MOVOload {
11319 break
11320 }
11321 srcOff := auxIntToInt32(v_1.AuxInt)
11322 srcSym := auxToSym(v_1.Aux)
11323 v_1_0 := v_1.Args[0]
11324 if v_1_0.Op != OpSB {
11325 break
11326 }
11327 mem := v_2
11328 if !(symIsRO(srcSym)) {
11329 break
11330 }
11331 v.reset(OpAMD64MOVQstore)
11332 v.AuxInt = int32ToAuxInt(dstOff + 8)
11333 v.Aux = symToAux(dstSym)
11334 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11335 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
11336 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
11337 v1.AuxInt = int32ToAuxInt(dstOff)
11338 v1.Aux = symToAux(dstSym)
11339 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11340 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
11341 v1.AddArg3(ptr, v2, mem)
11342 v.AddArg3(ptr, v0, v1)
11343 return true
11344 }
11345 return false
11346 }
11347 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
11348 v_1 := v.Args[1]
11349 v_0 := v.Args[0]
11350
11351
11352
11353 for {
11354 sc := auxIntToValAndOff(v.AuxInt)
11355 s := auxToSym(v.Aux)
11356 if v_0.Op != OpAMD64ADDQconst {
11357 break
11358 }
11359 off := auxIntToInt32(v_0.AuxInt)
11360 ptr := v_0.Args[0]
11361 mem := v_1
11362 if !(ValAndOff(sc).canAdd32(off)) {
11363 break
11364 }
11365 v.reset(OpAMD64MOVOstoreconst)
11366 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11367 v.Aux = symToAux(s)
11368 v.AddArg2(ptr, mem)
11369 return true
11370 }
11371
11372
11373
11374 for {
11375 sc := auxIntToValAndOff(v.AuxInt)
11376 sym1 := auxToSym(v.Aux)
11377 if v_0.Op != OpAMD64LEAQ {
11378 break
11379 }
11380 off := auxIntToInt32(v_0.AuxInt)
11381 sym2 := auxToSym(v_0.Aux)
11382 ptr := v_0.Args[0]
11383 mem := v_1
11384 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11385 break
11386 }
11387 v.reset(OpAMD64MOVOstoreconst)
11388 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11389 v.Aux = symToAux(mergeSym(sym1, sym2))
11390 v.AddArg2(ptr, mem)
11391 return true
11392 }
11393 return false
11394 }
11395 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
11396 v_1 := v.Args[1]
11397 v_0 := v.Args[0]
11398
11399
11400
11401 for {
11402 off1 := auxIntToInt32(v.AuxInt)
11403 sym := auxToSym(v.Aux)
11404 if v_0.Op != OpAMD64ADDQconst {
11405 break
11406 }
11407 off2 := auxIntToInt32(v_0.AuxInt)
11408 ptr := v_0.Args[0]
11409 mem := v_1
11410 if !(is32Bit(int64(off1) + int64(off2))) {
11411 break
11412 }
11413 v.reset(OpAMD64MOVQatomicload)
11414 v.AuxInt = int32ToAuxInt(off1 + off2)
11415 v.Aux = symToAux(sym)
11416 v.AddArg2(ptr, mem)
11417 return true
11418 }
11419
11420
11421
11422 for {
11423 off1 := auxIntToInt32(v.AuxInt)
11424 sym1 := auxToSym(v.Aux)
11425 if v_0.Op != OpAMD64LEAQ {
11426 break
11427 }
11428 off2 := auxIntToInt32(v_0.AuxInt)
11429 sym2 := auxToSym(v_0.Aux)
11430 ptr := v_0.Args[0]
11431 mem := v_1
11432 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11433 break
11434 }
11435 v.reset(OpAMD64MOVQatomicload)
11436 v.AuxInt = int32ToAuxInt(off1 + off2)
11437 v.Aux = symToAux(mergeSym(sym1, sym2))
11438 v.AddArg2(ptr, mem)
11439 return true
11440 }
11441 return false
11442 }
11443 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
11444 v_0 := v.Args[0]
11445 b := v.Block
11446
11447
11448
11449 for {
11450 t := v.Type
11451 if v_0.Op != OpArg {
11452 break
11453 }
11454 u := v_0.Type
11455 off := auxIntToInt32(v_0.AuxInt)
11456 sym := auxToSym(v_0.Aux)
11457 if !(t.Size() == u.Size()) {
11458 break
11459 }
11460 b = b.Func.Entry
11461 v0 := b.NewValue0(v.Pos, OpArg, t)
11462 v.copyOf(v0)
11463 v0.AuxInt = int32ToAuxInt(off)
11464 v0.Aux = symToAux(sym)
11465 return true
11466 }
11467 return false
11468 }
11469 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
11470 v_0 := v.Args[0]
11471 b := v.Block
11472
11473
11474
11475 for {
11476 t := v.Type
11477 if v_0.Op != OpArg {
11478 break
11479 }
11480 u := v_0.Type
11481 off := auxIntToInt32(v_0.AuxInt)
11482 sym := auxToSym(v_0.Aux)
11483 if !(t.Size() == u.Size()) {
11484 break
11485 }
11486 b = b.Func.Entry
11487 v0 := b.NewValue0(v.Pos, OpArg, t)
11488 v.copyOf(v0)
11489 v0.AuxInt = int32ToAuxInt(off)
11490 v0.Aux = symToAux(sym)
11491 return true
11492 }
11493 return false
11494 }
11495 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
11496 v_1 := v.Args[1]
11497 v_0 := v.Args[0]
11498 b := v.Block
11499 config := b.Func.Config
11500
11501
11502
11503 for {
11504 off := auxIntToInt32(v.AuxInt)
11505 sym := auxToSym(v.Aux)
11506 ptr := v_0
11507 if v_1.Op != OpAMD64MOVQstore {
11508 break
11509 }
11510 off2 := auxIntToInt32(v_1.AuxInt)
11511 sym2 := auxToSym(v_1.Aux)
11512 x := v_1.Args[1]
11513 ptr2 := v_1.Args[0]
11514 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11515 break
11516 }
11517 v.copyOf(x)
11518 return true
11519 }
11520
11521
11522
11523 for {
11524 off1 := auxIntToInt32(v.AuxInt)
11525 sym := auxToSym(v.Aux)
11526 if v_0.Op != OpAMD64ADDQconst {
11527 break
11528 }
11529 off2 := auxIntToInt32(v_0.AuxInt)
11530 ptr := v_0.Args[0]
11531 mem := v_1
11532 if !(is32Bit(int64(off1) + int64(off2))) {
11533 break
11534 }
11535 v.reset(OpAMD64MOVQload)
11536 v.AuxInt = int32ToAuxInt(off1 + off2)
11537 v.Aux = symToAux(sym)
11538 v.AddArg2(ptr, mem)
11539 return true
11540 }
11541
11542
11543
11544 for {
11545 off1 := auxIntToInt32(v.AuxInt)
11546 sym1 := auxToSym(v.Aux)
11547 if v_0.Op != OpAMD64LEAQ {
11548 break
11549 }
11550 off2 := auxIntToInt32(v_0.AuxInt)
11551 sym2 := auxToSym(v_0.Aux)
11552 base := v_0.Args[0]
11553 mem := v_1
11554 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11555 break
11556 }
11557 v.reset(OpAMD64MOVQload)
11558 v.AuxInt = int32ToAuxInt(off1 + off2)
11559 v.Aux = symToAux(mergeSym(sym1, sym2))
11560 v.AddArg2(base, mem)
11561 return true
11562 }
11563
11564
11565 for {
11566 off := auxIntToInt32(v.AuxInt)
11567 sym := auxToSym(v.Aux)
11568 ptr := v_0
11569 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
11570 break
11571 }
11572 val := v_1.Args[1]
11573 if ptr != v_1.Args[0] {
11574 break
11575 }
11576 v.reset(OpAMD64MOVQf2i)
11577 v.AddArg(val)
11578 return true
11579 }
11580
11581
11582
11583 for {
11584 off := auxIntToInt32(v.AuxInt)
11585 sym := auxToSym(v.Aux)
11586 if v_0.Op != OpSB || !(symIsRO(sym)) {
11587 break
11588 }
11589 v.reset(OpAMD64MOVQconst)
11590 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11591 return true
11592 }
11593 return false
11594 }
11595 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
11596 v_2 := v.Args[2]
11597 v_1 := v.Args[1]
11598 v_0 := v.Args[0]
11599
11600
11601
11602 for {
11603 off1 := auxIntToInt32(v.AuxInt)
11604 sym := auxToSym(v.Aux)
11605 if v_0.Op != OpAMD64ADDQconst {
11606 break
11607 }
11608 off2 := auxIntToInt32(v_0.AuxInt)
11609 ptr := v_0.Args[0]
11610 val := v_1
11611 mem := v_2
11612 if !(is32Bit(int64(off1) + int64(off2))) {
11613 break
11614 }
11615 v.reset(OpAMD64MOVQstore)
11616 v.AuxInt = int32ToAuxInt(off1 + off2)
11617 v.Aux = symToAux(sym)
11618 v.AddArg3(ptr, val, mem)
11619 return true
11620 }
11621
11622
11623
11624 for {
11625 off := auxIntToInt32(v.AuxInt)
11626 sym := auxToSym(v.Aux)
11627 ptr := v_0
11628 if v_1.Op != OpAMD64MOVQconst {
11629 break
11630 }
11631 c := auxIntToInt64(v_1.AuxInt)
11632 mem := v_2
11633 if !(validVal(c)) {
11634 break
11635 }
11636 v.reset(OpAMD64MOVQstoreconst)
11637 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11638 v.Aux = symToAux(sym)
11639 v.AddArg2(ptr, mem)
11640 return true
11641 }
11642
11643
11644
11645 for {
11646 off1 := auxIntToInt32(v.AuxInt)
11647 sym1 := auxToSym(v.Aux)
11648 if v_0.Op != OpAMD64LEAQ {
11649 break
11650 }
11651 off2 := auxIntToInt32(v_0.AuxInt)
11652 sym2 := auxToSym(v_0.Aux)
11653 base := v_0.Args[0]
11654 val := v_1
11655 mem := v_2
11656 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11657 break
11658 }
11659 v.reset(OpAMD64MOVQstore)
11660 v.AuxInt = int32ToAuxInt(off1 + off2)
11661 v.Aux = symToAux(mergeSym(sym1, sym2))
11662 v.AddArg3(base, val, mem)
11663 return true
11664 }
11665
11666
11667
11668 for {
11669 off := auxIntToInt32(v.AuxInt)
11670 sym := auxToSym(v.Aux)
11671 ptr := v_0
11672 y := v_1
11673 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11674 break
11675 }
11676 mem := y.Args[2]
11677 x := y.Args[0]
11678 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11679 break
11680 }
11681 v.reset(OpAMD64ADDQmodify)
11682 v.AuxInt = int32ToAuxInt(off)
11683 v.Aux = symToAux(sym)
11684 v.AddArg3(ptr, x, mem)
11685 return true
11686 }
11687
11688
11689
11690 for {
11691 off := auxIntToInt32(v.AuxInt)
11692 sym := auxToSym(v.Aux)
11693 ptr := v_0
11694 y := v_1
11695 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11696 break
11697 }
11698 mem := y.Args[2]
11699 x := y.Args[0]
11700 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11701 break
11702 }
11703 v.reset(OpAMD64ANDQmodify)
11704 v.AuxInt = int32ToAuxInt(off)
11705 v.Aux = symToAux(sym)
11706 v.AddArg3(ptr, x, mem)
11707 return true
11708 }
11709
11710
11711
11712 for {
11713 off := auxIntToInt32(v.AuxInt)
11714 sym := auxToSym(v.Aux)
11715 ptr := v_0
11716 y := v_1
11717 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11718 break
11719 }
11720 mem := y.Args[2]
11721 x := y.Args[0]
11722 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11723 break
11724 }
11725 v.reset(OpAMD64ORQmodify)
11726 v.AuxInt = int32ToAuxInt(off)
11727 v.Aux = symToAux(sym)
11728 v.AddArg3(ptr, x, mem)
11729 return true
11730 }
11731
11732
11733
11734 for {
11735 off := auxIntToInt32(v.AuxInt)
11736 sym := auxToSym(v.Aux)
11737 ptr := v_0
11738 y := v_1
11739 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11740 break
11741 }
11742 mem := y.Args[2]
11743 x := y.Args[0]
11744 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11745 break
11746 }
11747 v.reset(OpAMD64XORQmodify)
11748 v.AuxInt = int32ToAuxInt(off)
11749 v.Aux = symToAux(sym)
11750 v.AddArg3(ptr, x, mem)
11751 return true
11752 }
11753
11754
11755
11756 for {
11757 off := auxIntToInt32(v.AuxInt)
11758 sym := auxToSym(v.Aux)
11759 ptr := v_0
11760 y := v_1
11761 if y.Op != OpAMD64ADDQ {
11762 break
11763 }
11764 _ = y.Args[1]
11765 y_0 := y.Args[0]
11766 y_1 := y.Args[1]
11767 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11768 l := y_0
11769 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11770 continue
11771 }
11772 mem := l.Args[1]
11773 if ptr != l.Args[0] {
11774 continue
11775 }
11776 x := y_1
11777 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11778 continue
11779 }
11780 v.reset(OpAMD64ADDQmodify)
11781 v.AuxInt = int32ToAuxInt(off)
11782 v.Aux = symToAux(sym)
11783 v.AddArg3(ptr, x, mem)
11784 return true
11785 }
11786 break
11787 }
11788
11789
11790
11791 for {
11792 off := auxIntToInt32(v.AuxInt)
11793 sym := auxToSym(v.Aux)
11794 ptr := v_0
11795 y := v_1
11796 if y.Op != OpAMD64SUBQ {
11797 break
11798 }
11799 x := y.Args[1]
11800 l := y.Args[0]
11801 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11802 break
11803 }
11804 mem := l.Args[1]
11805 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11806 break
11807 }
11808 v.reset(OpAMD64SUBQmodify)
11809 v.AuxInt = int32ToAuxInt(off)
11810 v.Aux = symToAux(sym)
11811 v.AddArg3(ptr, x, mem)
11812 return true
11813 }
11814
11815
11816
11817 for {
11818 off := auxIntToInt32(v.AuxInt)
11819 sym := auxToSym(v.Aux)
11820 ptr := v_0
11821 y := v_1
11822 if y.Op != OpAMD64ANDQ {
11823 break
11824 }
11825 _ = y.Args[1]
11826 y_0 := y.Args[0]
11827 y_1 := y.Args[1]
11828 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11829 l := y_0
11830 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11831 continue
11832 }
11833 mem := l.Args[1]
11834 if ptr != l.Args[0] {
11835 continue
11836 }
11837 x := y_1
11838 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11839 continue
11840 }
11841 v.reset(OpAMD64ANDQmodify)
11842 v.AuxInt = int32ToAuxInt(off)
11843 v.Aux = symToAux(sym)
11844 v.AddArg3(ptr, x, mem)
11845 return true
11846 }
11847 break
11848 }
11849
11850
11851
11852 for {
11853 off := auxIntToInt32(v.AuxInt)
11854 sym := auxToSym(v.Aux)
11855 ptr := v_0
11856 y := v_1
11857 if y.Op != OpAMD64ORQ {
11858 break
11859 }
11860 _ = y.Args[1]
11861 y_0 := y.Args[0]
11862 y_1 := y.Args[1]
11863 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11864 l := y_0
11865 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11866 continue
11867 }
11868 mem := l.Args[1]
11869 if ptr != l.Args[0] {
11870 continue
11871 }
11872 x := y_1
11873 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11874 continue
11875 }
11876 v.reset(OpAMD64ORQmodify)
11877 v.AuxInt = int32ToAuxInt(off)
11878 v.Aux = symToAux(sym)
11879 v.AddArg3(ptr, x, mem)
11880 return true
11881 }
11882 break
11883 }
11884
11885
11886
11887 for {
11888 off := auxIntToInt32(v.AuxInt)
11889 sym := auxToSym(v.Aux)
11890 ptr := v_0
11891 y := v_1
11892 if y.Op != OpAMD64XORQ {
11893 break
11894 }
11895 _ = y.Args[1]
11896 y_0 := y.Args[0]
11897 y_1 := y.Args[1]
11898 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11899 l := y_0
11900 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11901 continue
11902 }
11903 mem := l.Args[1]
11904 if ptr != l.Args[0] {
11905 continue
11906 }
11907 x := y_1
11908 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11909 continue
11910 }
11911 v.reset(OpAMD64XORQmodify)
11912 v.AuxInt = int32ToAuxInt(off)
11913 v.Aux = symToAux(sym)
11914 v.AddArg3(ptr, x, mem)
11915 return true
11916 }
11917 break
11918 }
11919
11920
11921
11922 for {
11923 off := auxIntToInt32(v.AuxInt)
11924 sym := auxToSym(v.Aux)
11925 ptr := v_0
11926 x := v_1
11927 if x.Op != OpAMD64BTSQconst {
11928 break
11929 }
11930 c := auxIntToInt8(x.AuxInt)
11931 l := x.Args[0]
11932 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11933 break
11934 }
11935 mem := l.Args[1]
11936 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11937 break
11938 }
11939 v.reset(OpAMD64BTSQconstmodify)
11940 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11941 v.Aux = symToAux(sym)
11942 v.AddArg2(ptr, mem)
11943 return true
11944 }
11945
11946
11947
11948 for {
11949 off := auxIntToInt32(v.AuxInt)
11950 sym := auxToSym(v.Aux)
11951 ptr := v_0
11952 x := v_1
11953 if x.Op != OpAMD64BTRQconst {
11954 break
11955 }
11956 c := auxIntToInt8(x.AuxInt)
11957 l := x.Args[0]
11958 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11959 break
11960 }
11961 mem := l.Args[1]
11962 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11963 break
11964 }
11965 v.reset(OpAMD64BTRQconstmodify)
11966 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11967 v.Aux = symToAux(sym)
11968 v.AddArg2(ptr, mem)
11969 return true
11970 }
11971
11972
11973
11974 for {
11975 off := auxIntToInt32(v.AuxInt)
11976 sym := auxToSym(v.Aux)
11977 ptr := v_0
11978 x := v_1
11979 if x.Op != OpAMD64BTCQconst {
11980 break
11981 }
11982 c := auxIntToInt8(x.AuxInt)
11983 l := x.Args[0]
11984 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11985 break
11986 }
11987 mem := l.Args[1]
11988 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11989 break
11990 }
11991 v.reset(OpAMD64BTCQconstmodify)
11992 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11993 v.Aux = symToAux(sym)
11994 v.AddArg2(ptr, mem)
11995 return true
11996 }
11997
11998
11999
12000 for {
12001 off := auxIntToInt32(v.AuxInt)
12002 sym := auxToSym(v.Aux)
12003 ptr := v_0
12004 a := v_1
12005 if a.Op != OpAMD64ADDQconst {
12006 break
12007 }
12008 c := auxIntToInt32(a.AuxInt)
12009 l := a.Args[0]
12010 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12011 break
12012 }
12013 mem := l.Args[1]
12014 ptr2 := l.Args[0]
12015 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12016 break
12017 }
12018 v.reset(OpAMD64ADDQconstmodify)
12019 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12020 v.Aux = symToAux(sym)
12021 v.AddArg2(ptr, mem)
12022 return true
12023 }
12024
12025
12026
12027 for {
12028 off := auxIntToInt32(v.AuxInt)
12029 sym := auxToSym(v.Aux)
12030 ptr := v_0
12031 a := v_1
12032 if a.Op != OpAMD64ANDQconst {
12033 break
12034 }
12035 c := auxIntToInt32(a.AuxInt)
12036 l := a.Args[0]
12037 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12038 break
12039 }
12040 mem := l.Args[1]
12041 ptr2 := l.Args[0]
12042 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12043 break
12044 }
12045 v.reset(OpAMD64ANDQconstmodify)
12046 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12047 v.Aux = symToAux(sym)
12048 v.AddArg2(ptr, mem)
12049 return true
12050 }
12051
12052
12053
12054 for {
12055 off := auxIntToInt32(v.AuxInt)
12056 sym := auxToSym(v.Aux)
12057 ptr := v_0
12058 a := v_1
12059 if a.Op != OpAMD64ORQconst {
12060 break
12061 }
12062 c := auxIntToInt32(a.AuxInt)
12063 l := a.Args[0]
12064 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12065 break
12066 }
12067 mem := l.Args[1]
12068 ptr2 := l.Args[0]
12069 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12070 break
12071 }
12072 v.reset(OpAMD64ORQconstmodify)
12073 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12074 v.Aux = symToAux(sym)
12075 v.AddArg2(ptr, mem)
12076 return true
12077 }
12078
12079
12080
12081 for {
12082 off := auxIntToInt32(v.AuxInt)
12083 sym := auxToSym(v.Aux)
12084 ptr := v_0
12085 a := v_1
12086 if a.Op != OpAMD64XORQconst {
12087 break
12088 }
12089 c := auxIntToInt32(a.AuxInt)
12090 l := a.Args[0]
12091 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12092 break
12093 }
12094 mem := l.Args[1]
12095 ptr2 := l.Args[0]
12096 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12097 break
12098 }
12099 v.reset(OpAMD64XORQconstmodify)
12100 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12101 v.Aux = symToAux(sym)
12102 v.AddArg2(ptr, mem)
12103 return true
12104 }
12105
12106
12107 for {
12108 off := auxIntToInt32(v.AuxInt)
12109 sym := auxToSym(v.Aux)
12110 ptr := v_0
12111 if v_1.Op != OpAMD64MOVQf2i {
12112 break
12113 }
12114 val := v_1.Args[0]
12115 mem := v_2
12116 v.reset(OpAMD64MOVSDstore)
12117 v.AuxInt = int32ToAuxInt(off)
12118 v.Aux = symToAux(sym)
12119 v.AddArg3(ptr, val, mem)
12120 return true
12121 }
12122
12123
12124
12125 for {
12126 i := auxIntToInt32(v.AuxInt)
12127 s := auxToSym(v.Aux)
12128 p := v_0
12129 x := v_1
12130 if x.Op != OpAMD64BSWAPQ {
12131 break
12132 }
12133 w := x.Args[0]
12134 mem := v_2
12135 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12136 break
12137 }
12138 v.reset(OpAMD64MOVBEQstore)
12139 v.AuxInt = int32ToAuxInt(i)
12140 v.Aux = symToAux(s)
12141 v.AddArg3(p, w, mem)
12142 return true
12143 }
12144 return false
12145 }
12146 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
12147 v_1 := v.Args[1]
12148 v_0 := v.Args[0]
12149 b := v.Block
12150 config := b.Func.Config
12151
12152
12153
12154 for {
12155 sc := auxIntToValAndOff(v.AuxInt)
12156 s := auxToSym(v.Aux)
12157 if v_0.Op != OpAMD64ADDQconst {
12158 break
12159 }
12160 off := auxIntToInt32(v_0.AuxInt)
12161 ptr := v_0.Args[0]
12162 mem := v_1
12163 if !(ValAndOff(sc).canAdd32(off)) {
12164 break
12165 }
12166 v.reset(OpAMD64MOVQstoreconst)
12167 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12168 v.Aux = symToAux(s)
12169 v.AddArg2(ptr, mem)
12170 return true
12171 }
12172
12173
12174
12175 for {
12176 sc := auxIntToValAndOff(v.AuxInt)
12177 sym1 := auxToSym(v.Aux)
12178 if v_0.Op != OpAMD64LEAQ {
12179 break
12180 }
12181 off := auxIntToInt32(v_0.AuxInt)
12182 sym2 := auxToSym(v_0.Aux)
12183 ptr := v_0.Args[0]
12184 mem := v_1
12185 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12186 break
12187 }
12188 v.reset(OpAMD64MOVQstoreconst)
12189 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12190 v.Aux = symToAux(mergeSym(sym1, sym2))
12191 v.AddArg2(ptr, mem)
12192 return true
12193 }
12194
12195
12196
12197 for {
12198 c := auxIntToValAndOff(v.AuxInt)
12199 s := auxToSym(v.Aux)
12200 p1 := v_0
12201 x := v_1
12202 if x.Op != OpAMD64MOVQstoreconst {
12203 break
12204 }
12205 a := auxIntToValAndOff(x.AuxInt)
12206 if auxToSym(x.Aux) != s {
12207 break
12208 }
12209 mem := x.Args[1]
12210 p0 := x.Args[0]
12211 if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12212 break
12213 }
12214 v.reset(OpAMD64MOVOstoreconst)
12215 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12216 v.Aux = symToAux(s)
12217 v.AddArg2(p0, mem)
12218 return true
12219 }
12220
12221
12222
12223 for {
12224 a := auxIntToValAndOff(v.AuxInt)
12225 s := auxToSym(v.Aux)
12226 p0 := v_0
12227 x := v_1
12228 if x.Op != OpAMD64MOVQstoreconst {
12229 break
12230 }
12231 c := auxIntToValAndOff(x.AuxInt)
12232 if auxToSym(x.Aux) != s {
12233 break
12234 }
12235 mem := x.Args[1]
12236 p1 := x.Args[0]
12237 if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12238 break
12239 }
12240 v.reset(OpAMD64MOVOstoreconst)
12241 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12242 v.Aux = symToAux(s)
12243 v.AddArg2(p0, mem)
12244 return true
12245 }
12246 return false
12247 }
12248 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
12249 v_1 := v.Args[1]
12250 v_0 := v.Args[0]
12251
12252
12253
12254 for {
12255 off1 := auxIntToInt32(v.AuxInt)
12256 sym := auxToSym(v.Aux)
12257 if v_0.Op != OpAMD64ADDQconst {
12258 break
12259 }
12260 off2 := auxIntToInt32(v_0.AuxInt)
12261 ptr := v_0.Args[0]
12262 mem := v_1
12263 if !(is32Bit(int64(off1) + int64(off2))) {
12264 break
12265 }
12266 v.reset(OpAMD64MOVSDload)
12267 v.AuxInt = int32ToAuxInt(off1 + off2)
12268 v.Aux = symToAux(sym)
12269 v.AddArg2(ptr, mem)
12270 return true
12271 }
12272
12273
12274
12275 for {
12276 off1 := auxIntToInt32(v.AuxInt)
12277 sym1 := auxToSym(v.Aux)
12278 if v_0.Op != OpAMD64LEAQ {
12279 break
12280 }
12281 off2 := auxIntToInt32(v_0.AuxInt)
12282 sym2 := auxToSym(v_0.Aux)
12283 base := v_0.Args[0]
12284 mem := v_1
12285 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12286 break
12287 }
12288 v.reset(OpAMD64MOVSDload)
12289 v.AuxInt = int32ToAuxInt(off1 + off2)
12290 v.Aux = symToAux(mergeSym(sym1, sym2))
12291 v.AddArg2(base, mem)
12292 return true
12293 }
12294
12295
12296 for {
12297 off := auxIntToInt32(v.AuxInt)
12298 sym := auxToSym(v.Aux)
12299 ptr := v_0
12300 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12301 break
12302 }
12303 val := v_1.Args[1]
12304 if ptr != v_1.Args[0] {
12305 break
12306 }
12307 v.reset(OpAMD64MOVQi2f)
12308 v.AddArg(val)
12309 return true
12310 }
12311 return false
12312 }
12313 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
12314 v_2 := v.Args[2]
12315 v_1 := v.Args[1]
12316 v_0 := v.Args[0]
12317 b := v.Block
12318 typ := &b.Func.Config.Types
12319
12320
12321
12322 for {
12323 off1 := auxIntToInt32(v.AuxInt)
12324 sym := auxToSym(v.Aux)
12325 if v_0.Op != OpAMD64ADDQconst {
12326 break
12327 }
12328 off2 := auxIntToInt32(v_0.AuxInt)
12329 ptr := v_0.Args[0]
12330 val := v_1
12331 mem := v_2
12332 if !(is32Bit(int64(off1) + int64(off2))) {
12333 break
12334 }
12335 v.reset(OpAMD64MOVSDstore)
12336 v.AuxInt = int32ToAuxInt(off1 + off2)
12337 v.Aux = symToAux(sym)
12338 v.AddArg3(ptr, val, mem)
12339 return true
12340 }
12341
12342
12343
12344 for {
12345 off1 := auxIntToInt32(v.AuxInt)
12346 sym1 := auxToSym(v.Aux)
12347 if v_0.Op != OpAMD64LEAQ {
12348 break
12349 }
12350 off2 := auxIntToInt32(v_0.AuxInt)
12351 sym2 := auxToSym(v_0.Aux)
12352 base := v_0.Args[0]
12353 val := v_1
12354 mem := v_2
12355 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12356 break
12357 }
12358 v.reset(OpAMD64MOVSDstore)
12359 v.AuxInt = int32ToAuxInt(off1 + off2)
12360 v.Aux = symToAux(mergeSym(sym1, sym2))
12361 v.AddArg3(base, val, mem)
12362 return true
12363 }
12364
12365
12366 for {
12367 off := auxIntToInt32(v.AuxInt)
12368 sym := auxToSym(v.Aux)
12369 ptr := v_0
12370 if v_1.Op != OpAMD64MOVQi2f {
12371 break
12372 }
12373 val := v_1.Args[0]
12374 mem := v_2
12375 v.reset(OpAMD64MOVQstore)
12376 v.AuxInt = int32ToAuxInt(off)
12377 v.Aux = symToAux(sym)
12378 v.AddArg3(ptr, val, mem)
12379 return true
12380 }
12381
12382
12383
12384 for {
12385 off := auxIntToInt32(v.AuxInt)
12386 sym := auxToSym(v.Aux)
12387 ptr := v_0
12388 if v_1.Op != OpAMD64MOVSDconst {
12389 break
12390 }
12391 f := auxIntToFloat64(v_1.AuxInt)
12392 mem := v_2
12393 if !(f == f) {
12394 break
12395 }
12396 v.reset(OpAMD64MOVQstore)
12397 v.AuxInt = int32ToAuxInt(off)
12398 v.Aux = symToAux(sym)
12399 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
12400 v0.AuxInt = int64ToAuxInt(int64(math.Float64bits(f)))
12401 v.AddArg3(ptr, v0, mem)
12402 return true
12403 }
12404 return false
12405 }
12406 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
12407 v_1 := v.Args[1]
12408 v_0 := v.Args[0]
12409
12410
12411
12412 for {
12413 off1 := auxIntToInt32(v.AuxInt)
12414 sym := auxToSym(v.Aux)
12415 if v_0.Op != OpAMD64ADDQconst {
12416 break
12417 }
12418 off2 := auxIntToInt32(v_0.AuxInt)
12419 ptr := v_0.Args[0]
12420 mem := v_1
12421 if !(is32Bit(int64(off1) + int64(off2))) {
12422 break
12423 }
12424 v.reset(OpAMD64MOVSSload)
12425 v.AuxInt = int32ToAuxInt(off1 + off2)
12426 v.Aux = symToAux(sym)
12427 v.AddArg2(ptr, mem)
12428 return true
12429 }
12430
12431
12432
12433 for {
12434 off1 := auxIntToInt32(v.AuxInt)
12435 sym1 := auxToSym(v.Aux)
12436 if v_0.Op != OpAMD64LEAQ {
12437 break
12438 }
12439 off2 := auxIntToInt32(v_0.AuxInt)
12440 sym2 := auxToSym(v_0.Aux)
12441 base := v_0.Args[0]
12442 mem := v_1
12443 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12444 break
12445 }
12446 v.reset(OpAMD64MOVSSload)
12447 v.AuxInt = int32ToAuxInt(off1 + off2)
12448 v.Aux = symToAux(mergeSym(sym1, sym2))
12449 v.AddArg2(base, mem)
12450 return true
12451 }
12452
12453
12454 for {
12455 off := auxIntToInt32(v.AuxInt)
12456 sym := auxToSym(v.Aux)
12457 ptr := v_0
12458 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12459 break
12460 }
12461 val := v_1.Args[1]
12462 if ptr != v_1.Args[0] {
12463 break
12464 }
12465 v.reset(OpAMD64MOVLi2f)
12466 v.AddArg(val)
12467 return true
12468 }
12469 return false
12470 }
12471 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
12472 v_2 := v.Args[2]
12473 v_1 := v.Args[1]
12474 v_0 := v.Args[0]
12475 b := v.Block
12476 typ := &b.Func.Config.Types
12477
12478
12479
12480 for {
12481 off1 := auxIntToInt32(v.AuxInt)
12482 sym := auxToSym(v.Aux)
12483 if v_0.Op != OpAMD64ADDQconst {
12484 break
12485 }
12486 off2 := auxIntToInt32(v_0.AuxInt)
12487 ptr := v_0.Args[0]
12488 val := v_1
12489 mem := v_2
12490 if !(is32Bit(int64(off1) + int64(off2))) {
12491 break
12492 }
12493 v.reset(OpAMD64MOVSSstore)
12494 v.AuxInt = int32ToAuxInt(off1 + off2)
12495 v.Aux = symToAux(sym)
12496 v.AddArg3(ptr, val, mem)
12497 return true
12498 }
12499
12500
12501
12502 for {
12503 off1 := auxIntToInt32(v.AuxInt)
12504 sym1 := auxToSym(v.Aux)
12505 if v_0.Op != OpAMD64LEAQ {
12506 break
12507 }
12508 off2 := auxIntToInt32(v_0.AuxInt)
12509 sym2 := auxToSym(v_0.Aux)
12510 base := v_0.Args[0]
12511 val := v_1
12512 mem := v_2
12513 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12514 break
12515 }
12516 v.reset(OpAMD64MOVSSstore)
12517 v.AuxInt = int32ToAuxInt(off1 + off2)
12518 v.Aux = symToAux(mergeSym(sym1, sym2))
12519 v.AddArg3(base, val, mem)
12520 return true
12521 }
12522
12523
12524 for {
12525 off := auxIntToInt32(v.AuxInt)
12526 sym := auxToSym(v.Aux)
12527 ptr := v_0
12528 if v_1.Op != OpAMD64MOVLi2f {
12529 break
12530 }
12531 val := v_1.Args[0]
12532 mem := v_2
12533 v.reset(OpAMD64MOVLstore)
12534 v.AuxInt = int32ToAuxInt(off)
12535 v.Aux = symToAux(sym)
12536 v.AddArg3(ptr, val, mem)
12537 return true
12538 }
12539
12540
12541
12542 for {
12543 off := auxIntToInt32(v.AuxInt)
12544 sym := auxToSym(v.Aux)
12545 ptr := v_0
12546 if v_1.Op != OpAMD64MOVSSconst {
12547 break
12548 }
12549 f := auxIntToFloat32(v_1.AuxInt)
12550 mem := v_2
12551 if !(f == f) {
12552 break
12553 }
12554 v.reset(OpAMD64MOVLstore)
12555 v.AuxInt = int32ToAuxInt(off)
12556 v.Aux = symToAux(sym)
12557 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt32)
12558 v0.AuxInt = int32ToAuxInt(int32(math.Float32bits(f)))
12559 v.AddArg3(ptr, v0, mem)
12560 return true
12561 }
12562 return false
12563 }
12564 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
12565 v_0 := v.Args[0]
12566 b := v.Block
12567
12568
12569
12570 for {
12571 x := v_0
12572 if x.Op != OpAMD64MOVWload {
12573 break
12574 }
12575 off := auxIntToInt32(x.AuxInt)
12576 sym := auxToSym(x.Aux)
12577 mem := x.Args[1]
12578 ptr := x.Args[0]
12579 if !(x.Uses == 1 && clobber(x)) {
12580 break
12581 }
12582 b = x.Block
12583 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12584 v.copyOf(v0)
12585 v0.AuxInt = int32ToAuxInt(off)
12586 v0.Aux = symToAux(sym)
12587 v0.AddArg2(ptr, mem)
12588 return true
12589 }
12590
12591
12592
12593 for {
12594 x := v_0
12595 if x.Op != OpAMD64MOVLload {
12596 break
12597 }
12598 off := auxIntToInt32(x.AuxInt)
12599 sym := auxToSym(x.Aux)
12600 mem := x.Args[1]
12601 ptr := x.Args[0]
12602 if !(x.Uses == 1 && clobber(x)) {
12603 break
12604 }
12605 b = x.Block
12606 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12607 v.copyOf(v0)
12608 v0.AuxInt = int32ToAuxInt(off)
12609 v0.Aux = symToAux(sym)
12610 v0.AddArg2(ptr, mem)
12611 return true
12612 }
12613
12614
12615
12616 for {
12617 x := v_0
12618 if x.Op != OpAMD64MOVQload {
12619 break
12620 }
12621 off := auxIntToInt32(x.AuxInt)
12622 sym := auxToSym(x.Aux)
12623 mem := x.Args[1]
12624 ptr := x.Args[0]
12625 if !(x.Uses == 1 && clobber(x)) {
12626 break
12627 }
12628 b = x.Block
12629 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12630 v.copyOf(v0)
12631 v0.AuxInt = int32ToAuxInt(off)
12632 v0.Aux = symToAux(sym)
12633 v0.AddArg2(ptr, mem)
12634 return true
12635 }
12636
12637
12638
12639 for {
12640 if v_0.Op != OpAMD64ANDLconst {
12641 break
12642 }
12643 c := auxIntToInt32(v_0.AuxInt)
12644 x := v_0.Args[0]
12645 if !(c&0x8000 == 0) {
12646 break
12647 }
12648 v.reset(OpAMD64ANDLconst)
12649 v.AuxInt = int32ToAuxInt(c & 0x7fff)
12650 v.AddArg(x)
12651 return true
12652 }
12653
12654
12655 for {
12656 if v_0.Op != OpAMD64MOVWQSX {
12657 break
12658 }
12659 x := v_0.Args[0]
12660 v.reset(OpAMD64MOVWQSX)
12661 v.AddArg(x)
12662 return true
12663 }
12664
12665
12666 for {
12667 if v_0.Op != OpAMD64MOVBQSX {
12668 break
12669 }
12670 x := v_0.Args[0]
12671 v.reset(OpAMD64MOVBQSX)
12672 v.AddArg(x)
12673 return true
12674 }
12675 return false
12676 }
12677 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
12678 v_1 := v.Args[1]
12679 v_0 := v.Args[0]
12680
12681
12682
12683 for {
12684 off := auxIntToInt32(v.AuxInt)
12685 sym := auxToSym(v.Aux)
12686 ptr := v_0
12687 if v_1.Op != OpAMD64MOVWstore {
12688 break
12689 }
12690 off2 := auxIntToInt32(v_1.AuxInt)
12691 sym2 := auxToSym(v_1.Aux)
12692 x := v_1.Args[1]
12693 ptr2 := v_1.Args[0]
12694 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12695 break
12696 }
12697 v.reset(OpAMD64MOVWQSX)
12698 v.AddArg(x)
12699 return true
12700 }
12701
12702
12703
12704 for {
12705 off1 := auxIntToInt32(v.AuxInt)
12706 sym1 := auxToSym(v.Aux)
12707 if v_0.Op != OpAMD64LEAQ {
12708 break
12709 }
12710 off2 := auxIntToInt32(v_0.AuxInt)
12711 sym2 := auxToSym(v_0.Aux)
12712 base := v_0.Args[0]
12713 mem := v_1
12714 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12715 break
12716 }
12717 v.reset(OpAMD64MOVWQSXload)
12718 v.AuxInt = int32ToAuxInt(off1 + off2)
12719 v.Aux = symToAux(mergeSym(sym1, sym2))
12720 v.AddArg2(base, mem)
12721 return true
12722 }
12723 return false
12724 }
12725 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
12726 v_0 := v.Args[0]
12727 b := v.Block
12728
12729
12730
12731 for {
12732 x := v_0
12733 if x.Op != OpAMD64MOVWload {
12734 break
12735 }
12736 off := auxIntToInt32(x.AuxInt)
12737 sym := auxToSym(x.Aux)
12738 mem := x.Args[1]
12739 ptr := x.Args[0]
12740 if !(x.Uses == 1 && clobber(x)) {
12741 break
12742 }
12743 b = x.Block
12744 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12745 v.copyOf(v0)
12746 v0.AuxInt = int32ToAuxInt(off)
12747 v0.Aux = symToAux(sym)
12748 v0.AddArg2(ptr, mem)
12749 return true
12750 }
12751
12752
12753
12754 for {
12755 x := v_0
12756 if x.Op != OpAMD64MOVLload {
12757 break
12758 }
12759 off := auxIntToInt32(x.AuxInt)
12760 sym := auxToSym(x.Aux)
12761 mem := x.Args[1]
12762 ptr := x.Args[0]
12763 if !(x.Uses == 1 && clobber(x)) {
12764 break
12765 }
12766 b = x.Block
12767 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12768 v.copyOf(v0)
12769 v0.AuxInt = int32ToAuxInt(off)
12770 v0.Aux = symToAux(sym)
12771 v0.AddArg2(ptr, mem)
12772 return true
12773 }
12774
12775
12776
12777 for {
12778 x := v_0
12779 if x.Op != OpAMD64MOVQload {
12780 break
12781 }
12782 off := auxIntToInt32(x.AuxInt)
12783 sym := auxToSym(x.Aux)
12784 mem := x.Args[1]
12785 ptr := x.Args[0]
12786 if !(x.Uses == 1 && clobber(x)) {
12787 break
12788 }
12789 b = x.Block
12790 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12791 v.copyOf(v0)
12792 v0.AuxInt = int32ToAuxInt(off)
12793 v0.Aux = symToAux(sym)
12794 v0.AddArg2(ptr, mem)
12795 return true
12796 }
12797
12798
12799 for {
12800 if v_0.Op != OpAMD64ANDLconst {
12801 break
12802 }
12803 c := auxIntToInt32(v_0.AuxInt)
12804 x := v_0.Args[0]
12805 v.reset(OpAMD64ANDLconst)
12806 v.AuxInt = int32ToAuxInt(c & 0xffff)
12807 v.AddArg(x)
12808 return true
12809 }
12810
12811
12812 for {
12813 if v_0.Op != OpAMD64MOVWQZX {
12814 break
12815 }
12816 x := v_0.Args[0]
12817 v.reset(OpAMD64MOVWQZX)
12818 v.AddArg(x)
12819 return true
12820 }
12821
12822
12823 for {
12824 if v_0.Op != OpAMD64MOVBQZX {
12825 break
12826 }
12827 x := v_0.Args[0]
12828 v.reset(OpAMD64MOVBQZX)
12829 v.AddArg(x)
12830 return true
12831 }
12832 return false
12833 }
12834 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
12835 v_1 := v.Args[1]
12836 v_0 := v.Args[0]
12837 b := v.Block
12838 config := b.Func.Config
12839
12840
12841
12842 for {
12843 off := auxIntToInt32(v.AuxInt)
12844 sym := auxToSym(v.Aux)
12845 ptr := v_0
12846 if v_1.Op != OpAMD64MOVWstore {
12847 break
12848 }
12849 off2 := auxIntToInt32(v_1.AuxInt)
12850 sym2 := auxToSym(v_1.Aux)
12851 x := v_1.Args[1]
12852 ptr2 := v_1.Args[0]
12853 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12854 break
12855 }
12856 v.reset(OpAMD64MOVWQZX)
12857 v.AddArg(x)
12858 return true
12859 }
12860
12861
12862
12863 for {
12864 off1 := auxIntToInt32(v.AuxInt)
12865 sym := auxToSym(v.Aux)
12866 if v_0.Op != OpAMD64ADDQconst {
12867 break
12868 }
12869 off2 := auxIntToInt32(v_0.AuxInt)
12870 ptr := v_0.Args[0]
12871 mem := v_1
12872 if !(is32Bit(int64(off1) + int64(off2))) {
12873 break
12874 }
12875 v.reset(OpAMD64MOVWload)
12876 v.AuxInt = int32ToAuxInt(off1 + off2)
12877 v.Aux = symToAux(sym)
12878 v.AddArg2(ptr, mem)
12879 return true
12880 }
12881
12882
12883
12884 for {
12885 off1 := auxIntToInt32(v.AuxInt)
12886 sym1 := auxToSym(v.Aux)
12887 if v_0.Op != OpAMD64LEAQ {
12888 break
12889 }
12890 off2 := auxIntToInt32(v_0.AuxInt)
12891 sym2 := auxToSym(v_0.Aux)
12892 base := v_0.Args[0]
12893 mem := v_1
12894 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12895 break
12896 }
12897 v.reset(OpAMD64MOVWload)
12898 v.AuxInt = int32ToAuxInt(off1 + off2)
12899 v.Aux = symToAux(mergeSym(sym1, sym2))
12900 v.AddArg2(base, mem)
12901 return true
12902 }
12903
12904
12905
12906 for {
12907 off := auxIntToInt32(v.AuxInt)
12908 sym := auxToSym(v.Aux)
12909 if v_0.Op != OpSB || !(symIsRO(sym)) {
12910 break
12911 }
12912 v.reset(OpAMD64MOVLconst)
12913 v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
12914 return true
12915 }
12916 return false
12917 }
12918 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
12919 v_2 := v.Args[2]
12920 v_1 := v.Args[1]
12921 v_0 := v.Args[0]
12922
12923
12924 for {
12925 off := auxIntToInt32(v.AuxInt)
12926 sym := auxToSym(v.Aux)
12927 ptr := v_0
12928 if v_1.Op != OpAMD64MOVWQSX {
12929 break
12930 }
12931 x := v_1.Args[0]
12932 mem := v_2
12933 v.reset(OpAMD64MOVWstore)
12934 v.AuxInt = int32ToAuxInt(off)
12935 v.Aux = symToAux(sym)
12936 v.AddArg3(ptr, x, mem)
12937 return true
12938 }
12939
12940
12941 for {
12942 off := auxIntToInt32(v.AuxInt)
12943 sym := auxToSym(v.Aux)
12944 ptr := v_0
12945 if v_1.Op != OpAMD64MOVWQZX {
12946 break
12947 }
12948 x := v_1.Args[0]
12949 mem := v_2
12950 v.reset(OpAMD64MOVWstore)
12951 v.AuxInt = int32ToAuxInt(off)
12952 v.Aux = symToAux(sym)
12953 v.AddArg3(ptr, x, mem)
12954 return true
12955 }
12956
12957
12958
12959 for {
12960 off1 := auxIntToInt32(v.AuxInt)
12961 sym := auxToSym(v.Aux)
12962 if v_0.Op != OpAMD64ADDQconst {
12963 break
12964 }
12965 off2 := auxIntToInt32(v_0.AuxInt)
12966 ptr := v_0.Args[0]
12967 val := v_1
12968 mem := v_2
12969 if !(is32Bit(int64(off1) + int64(off2))) {
12970 break
12971 }
12972 v.reset(OpAMD64MOVWstore)
12973 v.AuxInt = int32ToAuxInt(off1 + off2)
12974 v.Aux = symToAux(sym)
12975 v.AddArg3(ptr, val, mem)
12976 return true
12977 }
12978
12979
12980 for {
12981 off := auxIntToInt32(v.AuxInt)
12982 sym := auxToSym(v.Aux)
12983 ptr := v_0
12984 if v_1.Op != OpAMD64MOVLconst {
12985 break
12986 }
12987 c := auxIntToInt32(v_1.AuxInt)
12988 mem := v_2
12989 v.reset(OpAMD64MOVWstoreconst)
12990 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
12991 v.Aux = symToAux(sym)
12992 v.AddArg2(ptr, mem)
12993 return true
12994 }
12995
12996
12997 for {
12998 off := auxIntToInt32(v.AuxInt)
12999 sym := auxToSym(v.Aux)
13000 ptr := v_0
13001 if v_1.Op != OpAMD64MOVQconst {
13002 break
13003 }
13004 c := auxIntToInt64(v_1.AuxInt)
13005 mem := v_2
13006 v.reset(OpAMD64MOVWstoreconst)
13007 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
13008 v.Aux = symToAux(sym)
13009 v.AddArg2(ptr, mem)
13010 return true
13011 }
13012
13013
13014
13015 for {
13016 off1 := auxIntToInt32(v.AuxInt)
13017 sym1 := auxToSym(v.Aux)
13018 if v_0.Op != OpAMD64LEAQ {
13019 break
13020 }
13021 off2 := auxIntToInt32(v_0.AuxInt)
13022 sym2 := auxToSym(v_0.Aux)
13023 base := v_0.Args[0]
13024 val := v_1
13025 mem := v_2
13026 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13027 break
13028 }
13029 v.reset(OpAMD64MOVWstore)
13030 v.AuxInt = int32ToAuxInt(off1 + off2)
13031 v.Aux = symToAux(mergeSym(sym1, sym2))
13032 v.AddArg3(base, val, mem)
13033 return true
13034 }
13035
13036
13037
13038 for {
13039 i := auxIntToInt32(v.AuxInt)
13040 s := auxToSym(v.Aux)
13041 p := v_0
13042 x := v_1
13043 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
13044 break
13045 }
13046 w := x.Args[0]
13047 mem := v_2
13048 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
13049 break
13050 }
13051 v.reset(OpAMD64MOVBEWstore)
13052 v.AuxInt = int32ToAuxInt(i)
13053 v.Aux = symToAux(s)
13054 v.AddArg3(p, w, mem)
13055 return true
13056 }
13057 return false
13058 }
13059 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
13060 v_1 := v.Args[1]
13061 v_0 := v.Args[0]
13062
13063
13064
13065 for {
13066 sc := auxIntToValAndOff(v.AuxInt)
13067 s := auxToSym(v.Aux)
13068 if v_0.Op != OpAMD64ADDQconst {
13069 break
13070 }
13071 off := auxIntToInt32(v_0.AuxInt)
13072 ptr := v_0.Args[0]
13073 mem := v_1
13074 if !(ValAndOff(sc).canAdd32(off)) {
13075 break
13076 }
13077 v.reset(OpAMD64MOVWstoreconst)
13078 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13079 v.Aux = symToAux(s)
13080 v.AddArg2(ptr, mem)
13081 return true
13082 }
13083
13084
13085
13086 for {
13087 sc := auxIntToValAndOff(v.AuxInt)
13088 sym1 := auxToSym(v.Aux)
13089 if v_0.Op != OpAMD64LEAQ {
13090 break
13091 }
13092 off := auxIntToInt32(v_0.AuxInt)
13093 sym2 := auxToSym(v_0.Aux)
13094 ptr := v_0.Args[0]
13095 mem := v_1
13096 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
13097 break
13098 }
13099 v.reset(OpAMD64MOVWstoreconst)
13100 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13101 v.Aux = symToAux(mergeSym(sym1, sym2))
13102 v.AddArg2(ptr, mem)
13103 return true
13104 }
13105 return false
13106 }
13107 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
13108 v_1 := v.Args[1]
13109 v_0 := v.Args[0]
13110
13111
13112 for {
13113 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13114 x := v_0
13115 if v_1.Op != OpAMD64MOVLconst {
13116 continue
13117 }
13118 c := auxIntToInt32(v_1.AuxInt)
13119 v.reset(OpAMD64MULLconst)
13120 v.AuxInt = int32ToAuxInt(c)
13121 v.AddArg(x)
13122 return true
13123 }
13124 break
13125 }
13126 return false
13127 }
13128 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
13129 v_0 := v.Args[0]
13130 b := v.Block
13131
13132
13133 for {
13134 c := auxIntToInt32(v.AuxInt)
13135 if v_0.Op != OpAMD64MULLconst {
13136 break
13137 }
13138 d := auxIntToInt32(v_0.AuxInt)
13139 x := v_0.Args[0]
13140 v.reset(OpAMD64MULLconst)
13141 v.AuxInt = int32ToAuxInt(c * d)
13142 v.AddArg(x)
13143 return true
13144 }
13145
13146
13147 for {
13148 if auxIntToInt32(v.AuxInt) != -9 {
13149 break
13150 }
13151 x := v_0
13152 v.reset(OpAMD64NEGL)
13153 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13154 v0.AddArg2(x, x)
13155 v.AddArg(v0)
13156 return true
13157 }
13158
13159
13160 for {
13161 if auxIntToInt32(v.AuxInt) != -5 {
13162 break
13163 }
13164 x := v_0
13165 v.reset(OpAMD64NEGL)
13166 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13167 v0.AddArg2(x, x)
13168 v.AddArg(v0)
13169 return true
13170 }
13171
13172
13173 for {
13174 if auxIntToInt32(v.AuxInt) != -3 {
13175 break
13176 }
13177 x := v_0
13178 v.reset(OpAMD64NEGL)
13179 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13180 v0.AddArg2(x, x)
13181 v.AddArg(v0)
13182 return true
13183 }
13184
13185
13186 for {
13187 if auxIntToInt32(v.AuxInt) != -1 {
13188 break
13189 }
13190 x := v_0
13191 v.reset(OpAMD64NEGL)
13192 v.AddArg(x)
13193 return true
13194 }
13195
13196
13197 for {
13198 if auxIntToInt32(v.AuxInt) != 0 {
13199 break
13200 }
13201 v.reset(OpAMD64MOVLconst)
13202 v.AuxInt = int32ToAuxInt(0)
13203 return true
13204 }
13205
13206
13207 for {
13208 if auxIntToInt32(v.AuxInt) != 1 {
13209 break
13210 }
13211 x := v_0
13212 v.copyOf(x)
13213 return true
13214 }
13215
13216
13217 for {
13218 if auxIntToInt32(v.AuxInt) != 3 {
13219 break
13220 }
13221 x := v_0
13222 v.reset(OpAMD64LEAL2)
13223 v.AddArg2(x, x)
13224 return true
13225 }
13226
13227
13228 for {
13229 if auxIntToInt32(v.AuxInt) != 5 {
13230 break
13231 }
13232 x := v_0
13233 v.reset(OpAMD64LEAL4)
13234 v.AddArg2(x, x)
13235 return true
13236 }
13237
13238
13239 for {
13240 if auxIntToInt32(v.AuxInt) != 7 {
13241 break
13242 }
13243 x := v_0
13244 v.reset(OpAMD64LEAL2)
13245 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13246 v0.AddArg2(x, x)
13247 v.AddArg2(x, v0)
13248 return true
13249 }
13250
13251
13252 for {
13253 if auxIntToInt32(v.AuxInt) != 9 {
13254 break
13255 }
13256 x := v_0
13257 v.reset(OpAMD64LEAL8)
13258 v.AddArg2(x, x)
13259 return true
13260 }
13261
13262
13263 for {
13264 if auxIntToInt32(v.AuxInt) != 11 {
13265 break
13266 }
13267 x := v_0
13268 v.reset(OpAMD64LEAL2)
13269 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13270 v0.AddArg2(x, x)
13271 v.AddArg2(x, v0)
13272 return true
13273 }
13274
13275
13276 for {
13277 if auxIntToInt32(v.AuxInt) != 13 {
13278 break
13279 }
13280 x := v_0
13281 v.reset(OpAMD64LEAL4)
13282 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13283 v0.AddArg2(x, x)
13284 v.AddArg2(x, v0)
13285 return true
13286 }
13287
13288
13289 for {
13290 if auxIntToInt32(v.AuxInt) != 19 {
13291 break
13292 }
13293 x := v_0
13294 v.reset(OpAMD64LEAL2)
13295 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13296 v0.AddArg2(x, x)
13297 v.AddArg2(x, v0)
13298 return true
13299 }
13300
13301
13302 for {
13303 if auxIntToInt32(v.AuxInt) != 21 {
13304 break
13305 }
13306 x := v_0
13307 v.reset(OpAMD64LEAL4)
13308 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13309 v0.AddArg2(x, x)
13310 v.AddArg2(x, v0)
13311 return true
13312 }
13313
13314
13315 for {
13316 if auxIntToInt32(v.AuxInt) != 25 {
13317 break
13318 }
13319 x := v_0
13320 v.reset(OpAMD64LEAL8)
13321 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13322 v0.AddArg2(x, x)
13323 v.AddArg2(x, v0)
13324 return true
13325 }
13326
13327
13328 for {
13329 if auxIntToInt32(v.AuxInt) != 27 {
13330 break
13331 }
13332 x := v_0
13333 v.reset(OpAMD64LEAL8)
13334 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13335 v0.AddArg2(x, x)
13336 v.AddArg2(v0, v0)
13337 return true
13338 }
13339
13340
13341 for {
13342 if auxIntToInt32(v.AuxInt) != 37 {
13343 break
13344 }
13345 x := v_0
13346 v.reset(OpAMD64LEAL4)
13347 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13348 v0.AddArg2(x, x)
13349 v.AddArg2(x, v0)
13350 return true
13351 }
13352
13353
13354 for {
13355 if auxIntToInt32(v.AuxInt) != 41 {
13356 break
13357 }
13358 x := v_0
13359 v.reset(OpAMD64LEAL8)
13360 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13361 v0.AddArg2(x, x)
13362 v.AddArg2(x, v0)
13363 return true
13364 }
13365
13366
13367 for {
13368 if auxIntToInt32(v.AuxInt) != 45 {
13369 break
13370 }
13371 x := v_0
13372 v.reset(OpAMD64LEAL8)
13373 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13374 v0.AddArg2(x, x)
13375 v.AddArg2(v0, v0)
13376 return true
13377 }
13378
13379
13380 for {
13381 if auxIntToInt32(v.AuxInt) != 73 {
13382 break
13383 }
13384 x := v_0
13385 v.reset(OpAMD64LEAL8)
13386 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13387 v0.AddArg2(x, x)
13388 v.AddArg2(x, v0)
13389 return true
13390 }
13391
13392
13393 for {
13394 if auxIntToInt32(v.AuxInt) != 81 {
13395 break
13396 }
13397 x := v_0
13398 v.reset(OpAMD64LEAL8)
13399 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13400 v0.AddArg2(x, x)
13401 v.AddArg2(v0, v0)
13402 return true
13403 }
13404
13405
13406
13407 for {
13408 c := auxIntToInt32(v.AuxInt)
13409 x := v_0
13410 if !(isPowerOfTwo(int64(c)+1) && c >= 15) {
13411 break
13412 }
13413 v.reset(OpAMD64SUBL)
13414 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13415 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
13416 v0.AddArg(x)
13417 v.AddArg2(v0, x)
13418 return true
13419 }
13420
13421
13422
13423 for {
13424 c := auxIntToInt32(v.AuxInt)
13425 x := v_0
13426 if !(isPowerOfTwo(c-1) && c >= 17) {
13427 break
13428 }
13429 v.reset(OpAMD64LEAL1)
13430 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13431 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
13432 v0.AddArg(x)
13433 v.AddArg2(v0, x)
13434 return true
13435 }
13436
13437
13438
13439 for {
13440 c := auxIntToInt32(v.AuxInt)
13441 x := v_0
13442 if !(isPowerOfTwo(c-2) && c >= 34) {
13443 break
13444 }
13445 v.reset(OpAMD64LEAL2)
13446 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13447 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
13448 v0.AddArg(x)
13449 v.AddArg2(v0, x)
13450 return true
13451 }
13452
13453
13454
13455 for {
13456 c := auxIntToInt32(v.AuxInt)
13457 x := v_0
13458 if !(isPowerOfTwo(c-4) && c >= 68) {
13459 break
13460 }
13461 v.reset(OpAMD64LEAL4)
13462 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13463 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
13464 v0.AddArg(x)
13465 v.AddArg2(v0, x)
13466 return true
13467 }
13468
13469
13470
13471 for {
13472 c := auxIntToInt32(v.AuxInt)
13473 x := v_0
13474 if !(isPowerOfTwo(c-8) && c >= 136) {
13475 break
13476 }
13477 v.reset(OpAMD64LEAL8)
13478 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13479 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
13480 v0.AddArg(x)
13481 v.AddArg2(v0, x)
13482 return true
13483 }
13484
13485
13486
13487 for {
13488 c := auxIntToInt32(v.AuxInt)
13489 x := v_0
13490 if !(c%3 == 0 && isPowerOfTwo(c/3)) {
13491 break
13492 }
13493 v.reset(OpAMD64SHLLconst)
13494 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
13495 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13496 v0.AddArg2(x, x)
13497 v.AddArg(v0)
13498 return true
13499 }
13500
13501
13502
13503 for {
13504 c := auxIntToInt32(v.AuxInt)
13505 x := v_0
13506 if !(c%5 == 0 && isPowerOfTwo(c/5)) {
13507 break
13508 }
13509 v.reset(OpAMD64SHLLconst)
13510 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
13511 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13512 v0.AddArg2(x, x)
13513 v.AddArg(v0)
13514 return true
13515 }
13516
13517
13518
13519 for {
13520 c := auxIntToInt32(v.AuxInt)
13521 x := v_0
13522 if !(c%9 == 0 && isPowerOfTwo(c/9)) {
13523 break
13524 }
13525 v.reset(OpAMD64SHLLconst)
13526 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
13527 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13528 v0.AddArg2(x, x)
13529 v.AddArg(v0)
13530 return true
13531 }
13532
13533
13534 for {
13535 c := auxIntToInt32(v.AuxInt)
13536 if v_0.Op != OpAMD64MOVLconst {
13537 break
13538 }
13539 d := auxIntToInt32(v_0.AuxInt)
13540 v.reset(OpAMD64MOVLconst)
13541 v.AuxInt = int32ToAuxInt(c * d)
13542 return true
13543 }
13544 return false
13545 }
13546 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
13547 v_1 := v.Args[1]
13548 v_0 := v.Args[0]
13549
13550
13551
13552 for {
13553 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13554 x := v_0
13555 if v_1.Op != OpAMD64MOVQconst {
13556 continue
13557 }
13558 c := auxIntToInt64(v_1.AuxInt)
13559 if !(is32Bit(c)) {
13560 continue
13561 }
13562 v.reset(OpAMD64MULQconst)
13563 v.AuxInt = int32ToAuxInt(int32(c))
13564 v.AddArg(x)
13565 return true
13566 }
13567 break
13568 }
13569 return false
13570 }
13571 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
13572 v_0 := v.Args[0]
13573 b := v.Block
13574
13575
13576
13577 for {
13578 c := auxIntToInt32(v.AuxInt)
13579 if v_0.Op != OpAMD64MULQconst {
13580 break
13581 }
13582 d := auxIntToInt32(v_0.AuxInt)
13583 x := v_0.Args[0]
13584 if !(is32Bit(int64(c) * int64(d))) {
13585 break
13586 }
13587 v.reset(OpAMD64MULQconst)
13588 v.AuxInt = int32ToAuxInt(c * d)
13589 v.AddArg(x)
13590 return true
13591 }
13592
13593
13594 for {
13595 if auxIntToInt32(v.AuxInt) != -9 {
13596 break
13597 }
13598 x := v_0
13599 v.reset(OpAMD64NEGQ)
13600 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13601 v0.AddArg2(x, x)
13602 v.AddArg(v0)
13603 return true
13604 }
13605
13606
13607 for {
13608 if auxIntToInt32(v.AuxInt) != -5 {
13609 break
13610 }
13611 x := v_0
13612 v.reset(OpAMD64NEGQ)
13613 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13614 v0.AddArg2(x, x)
13615 v.AddArg(v0)
13616 return true
13617 }
13618
13619
13620 for {
13621 if auxIntToInt32(v.AuxInt) != -3 {
13622 break
13623 }
13624 x := v_0
13625 v.reset(OpAMD64NEGQ)
13626 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13627 v0.AddArg2(x, x)
13628 v.AddArg(v0)
13629 return true
13630 }
13631
13632
13633 for {
13634 if auxIntToInt32(v.AuxInt) != -1 {
13635 break
13636 }
13637 x := v_0
13638 v.reset(OpAMD64NEGQ)
13639 v.AddArg(x)
13640 return true
13641 }
13642
13643
13644 for {
13645 if auxIntToInt32(v.AuxInt) != 0 {
13646 break
13647 }
13648 v.reset(OpAMD64MOVQconst)
13649 v.AuxInt = int64ToAuxInt(0)
13650 return true
13651 }
13652
13653
13654 for {
13655 if auxIntToInt32(v.AuxInt) != 1 {
13656 break
13657 }
13658 x := v_0
13659 v.copyOf(x)
13660 return true
13661 }
13662
13663
13664 for {
13665 if auxIntToInt32(v.AuxInt) != 3 {
13666 break
13667 }
13668 x := v_0
13669 v.reset(OpAMD64LEAQ2)
13670 v.AddArg2(x, x)
13671 return true
13672 }
13673
13674
13675 for {
13676 if auxIntToInt32(v.AuxInt) != 5 {
13677 break
13678 }
13679 x := v_0
13680 v.reset(OpAMD64LEAQ4)
13681 v.AddArg2(x, x)
13682 return true
13683 }
13684
13685
13686 for {
13687 if auxIntToInt32(v.AuxInt) != 7 {
13688 break
13689 }
13690 x := v_0
13691 v.reset(OpAMD64LEAQ2)
13692 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13693 v0.AddArg2(x, x)
13694 v.AddArg2(x, v0)
13695 return true
13696 }
13697
13698
13699 for {
13700 if auxIntToInt32(v.AuxInt) != 9 {
13701 break
13702 }
13703 x := v_0
13704 v.reset(OpAMD64LEAQ8)
13705 v.AddArg2(x, x)
13706 return true
13707 }
13708
13709
13710 for {
13711 if auxIntToInt32(v.AuxInt) != 11 {
13712 break
13713 }
13714 x := v_0
13715 v.reset(OpAMD64LEAQ2)
13716 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13717 v0.AddArg2(x, x)
13718 v.AddArg2(x, v0)
13719 return true
13720 }
13721
13722
13723 for {
13724 if auxIntToInt32(v.AuxInt) != 13 {
13725 break
13726 }
13727 x := v_0
13728 v.reset(OpAMD64LEAQ4)
13729 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13730 v0.AddArg2(x, x)
13731 v.AddArg2(x, v0)
13732 return true
13733 }
13734
13735
13736 for {
13737 if auxIntToInt32(v.AuxInt) != 19 {
13738 break
13739 }
13740 x := v_0
13741 v.reset(OpAMD64LEAQ2)
13742 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13743 v0.AddArg2(x, x)
13744 v.AddArg2(x, v0)
13745 return true
13746 }
13747
13748
13749 for {
13750 if auxIntToInt32(v.AuxInt) != 21 {
13751 break
13752 }
13753 x := v_0
13754 v.reset(OpAMD64LEAQ4)
13755 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13756 v0.AddArg2(x, x)
13757 v.AddArg2(x, v0)
13758 return true
13759 }
13760
13761
13762 for {
13763 if auxIntToInt32(v.AuxInt) != 25 {
13764 break
13765 }
13766 x := v_0
13767 v.reset(OpAMD64LEAQ8)
13768 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13769 v0.AddArg2(x, x)
13770 v.AddArg2(x, v0)
13771 return true
13772 }
13773
13774
13775 for {
13776 if auxIntToInt32(v.AuxInt) != 27 {
13777 break
13778 }
13779 x := v_0
13780 v.reset(OpAMD64LEAQ8)
13781 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13782 v0.AddArg2(x, x)
13783 v.AddArg2(v0, v0)
13784 return true
13785 }
13786
13787
13788 for {
13789 if auxIntToInt32(v.AuxInt) != 37 {
13790 break
13791 }
13792 x := v_0
13793 v.reset(OpAMD64LEAQ4)
13794 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13795 v0.AddArg2(x, x)
13796 v.AddArg2(x, v0)
13797 return true
13798 }
13799
13800
13801 for {
13802 if auxIntToInt32(v.AuxInt) != 41 {
13803 break
13804 }
13805 x := v_0
13806 v.reset(OpAMD64LEAQ8)
13807 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13808 v0.AddArg2(x, x)
13809 v.AddArg2(x, v0)
13810 return true
13811 }
13812
13813
13814 for {
13815 if auxIntToInt32(v.AuxInt) != 45 {
13816 break
13817 }
13818 x := v_0
13819 v.reset(OpAMD64LEAQ8)
13820 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13821 v0.AddArg2(x, x)
13822 v.AddArg2(v0, v0)
13823 return true
13824 }
13825
13826
13827 for {
13828 if auxIntToInt32(v.AuxInt) != 73 {
13829 break
13830 }
13831 x := v_0
13832 v.reset(OpAMD64LEAQ8)
13833 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13834 v0.AddArg2(x, x)
13835 v.AddArg2(x, v0)
13836 return true
13837 }
13838
13839
13840 for {
13841 if auxIntToInt32(v.AuxInt) != 81 {
13842 break
13843 }
13844 x := v_0
13845 v.reset(OpAMD64LEAQ8)
13846 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13847 v0.AddArg2(x, x)
13848 v.AddArg2(v0, v0)
13849 return true
13850 }
13851
13852
13853
13854 for {
13855 c := auxIntToInt32(v.AuxInt)
13856 x := v_0
13857 if !(isPowerOfTwo(int64(c)+1) && c >= 15) {
13858 break
13859 }
13860 v.reset(OpAMD64SUBQ)
13861 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13862 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
13863 v0.AddArg(x)
13864 v.AddArg2(v0, x)
13865 return true
13866 }
13867
13868
13869
13870 for {
13871 c := auxIntToInt32(v.AuxInt)
13872 x := v_0
13873 if !(isPowerOfTwo(c-1) && c >= 17) {
13874 break
13875 }
13876 v.reset(OpAMD64LEAQ1)
13877 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13878 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
13879 v0.AddArg(x)
13880 v.AddArg2(v0, x)
13881 return true
13882 }
13883
13884
13885
13886 for {
13887 c := auxIntToInt32(v.AuxInt)
13888 x := v_0
13889 if !(isPowerOfTwo(c-2) && c >= 34) {
13890 break
13891 }
13892 v.reset(OpAMD64LEAQ2)
13893 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13894 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
13895 v0.AddArg(x)
13896 v.AddArg2(v0, x)
13897 return true
13898 }
13899
13900
13901
13902 for {
13903 c := auxIntToInt32(v.AuxInt)
13904 x := v_0
13905 if !(isPowerOfTwo(c-4) && c >= 68) {
13906 break
13907 }
13908 v.reset(OpAMD64LEAQ4)
13909 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13910 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
13911 v0.AddArg(x)
13912 v.AddArg2(v0, x)
13913 return true
13914 }
13915
13916
13917
13918 for {
13919 c := auxIntToInt32(v.AuxInt)
13920 x := v_0
13921 if !(isPowerOfTwo(c-8) && c >= 136) {
13922 break
13923 }
13924 v.reset(OpAMD64LEAQ8)
13925 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13926 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
13927 v0.AddArg(x)
13928 v.AddArg2(v0, x)
13929 return true
13930 }
13931
13932
13933
13934 for {
13935 c := auxIntToInt32(v.AuxInt)
13936 x := v_0
13937 if !(c%3 == 0 && isPowerOfTwo(c/3)) {
13938 break
13939 }
13940 v.reset(OpAMD64SHLQconst)
13941 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
13942 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13943 v0.AddArg2(x, x)
13944 v.AddArg(v0)
13945 return true
13946 }
13947
13948
13949
13950 for {
13951 c := auxIntToInt32(v.AuxInt)
13952 x := v_0
13953 if !(c%5 == 0 && isPowerOfTwo(c/5)) {
13954 break
13955 }
13956 v.reset(OpAMD64SHLQconst)
13957 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
13958 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13959 v0.AddArg2(x, x)
13960 v.AddArg(v0)
13961 return true
13962 }
13963
13964
13965
13966 for {
13967 c := auxIntToInt32(v.AuxInt)
13968 x := v_0
13969 if !(c%9 == 0 && isPowerOfTwo(c/9)) {
13970 break
13971 }
13972 v.reset(OpAMD64SHLQconst)
13973 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
13974 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13975 v0.AddArg2(x, x)
13976 v.AddArg(v0)
13977 return true
13978 }
13979
13980
13981 for {
13982 c := auxIntToInt32(v.AuxInt)
13983 if v_0.Op != OpAMD64MOVQconst {
13984 break
13985 }
13986 d := auxIntToInt64(v_0.AuxInt)
13987 v.reset(OpAMD64MOVQconst)
13988 v.AuxInt = int64ToAuxInt(int64(c) * d)
13989 return true
13990 }
13991
13992
13993
13994 for {
13995 c := auxIntToInt32(v.AuxInt)
13996 if v_0.Op != OpAMD64NEGQ {
13997 break
13998 }
13999 x := v_0.Args[0]
14000 if !(c != -(1 << 31)) {
14001 break
14002 }
14003 v.reset(OpAMD64MULQconst)
14004 v.AuxInt = int32ToAuxInt(-c)
14005 v.AddArg(x)
14006 return true
14007 }
14008 return false
14009 }
14010 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
14011 v_1 := v.Args[1]
14012 v_0 := v.Args[0]
14013
14014
14015
14016 for {
14017 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14018 x := v_0
14019 l := v_1
14020 if l.Op != OpAMD64MOVSDload {
14021 continue
14022 }
14023 off := auxIntToInt32(l.AuxInt)
14024 sym := auxToSym(l.Aux)
14025 mem := l.Args[1]
14026 ptr := l.Args[0]
14027 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14028 continue
14029 }
14030 v.reset(OpAMD64MULSDload)
14031 v.AuxInt = int32ToAuxInt(off)
14032 v.Aux = symToAux(sym)
14033 v.AddArg3(x, ptr, mem)
14034 return true
14035 }
14036 break
14037 }
14038 return false
14039 }
14040 func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
14041 v_2 := v.Args[2]
14042 v_1 := v.Args[1]
14043 v_0 := v.Args[0]
14044 b := v.Block
14045 typ := &b.Func.Config.Types
14046
14047
14048
14049 for {
14050 off1 := auxIntToInt32(v.AuxInt)
14051 sym := auxToSym(v.Aux)
14052 val := v_0
14053 if v_1.Op != OpAMD64ADDQconst {
14054 break
14055 }
14056 off2 := auxIntToInt32(v_1.AuxInt)
14057 base := v_1.Args[0]
14058 mem := v_2
14059 if !(is32Bit(int64(off1) + int64(off2))) {
14060 break
14061 }
14062 v.reset(OpAMD64MULSDload)
14063 v.AuxInt = int32ToAuxInt(off1 + off2)
14064 v.Aux = symToAux(sym)
14065 v.AddArg3(val, base, mem)
14066 return true
14067 }
14068
14069
14070
14071 for {
14072 off1 := auxIntToInt32(v.AuxInt)
14073 sym1 := auxToSym(v.Aux)
14074 val := v_0
14075 if v_1.Op != OpAMD64LEAQ {
14076 break
14077 }
14078 off2 := auxIntToInt32(v_1.AuxInt)
14079 sym2 := auxToSym(v_1.Aux)
14080 base := v_1.Args[0]
14081 mem := v_2
14082 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14083 break
14084 }
14085 v.reset(OpAMD64MULSDload)
14086 v.AuxInt = int32ToAuxInt(off1 + off2)
14087 v.Aux = symToAux(mergeSym(sym1, sym2))
14088 v.AddArg3(val, base, mem)
14089 return true
14090 }
14091
14092
14093 for {
14094 off := auxIntToInt32(v.AuxInt)
14095 sym := auxToSym(v.Aux)
14096 x := v_0
14097 ptr := v_1
14098 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14099 break
14100 }
14101 y := v_2.Args[1]
14102 if ptr != v_2.Args[0] {
14103 break
14104 }
14105 v.reset(OpAMD64MULSD)
14106 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
14107 v0.AddArg(y)
14108 v.AddArg2(x, v0)
14109 return true
14110 }
14111 return false
14112 }
14113 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
14114 v_1 := v.Args[1]
14115 v_0 := v.Args[0]
14116
14117
14118
14119 for {
14120 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14121 x := v_0
14122 l := v_1
14123 if l.Op != OpAMD64MOVSSload {
14124 continue
14125 }
14126 off := auxIntToInt32(l.AuxInt)
14127 sym := auxToSym(l.Aux)
14128 mem := l.Args[1]
14129 ptr := l.Args[0]
14130 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14131 continue
14132 }
14133 v.reset(OpAMD64MULSSload)
14134 v.AuxInt = int32ToAuxInt(off)
14135 v.Aux = symToAux(sym)
14136 v.AddArg3(x, ptr, mem)
14137 return true
14138 }
14139 break
14140 }
14141 return false
14142 }
14143 func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
14144 v_2 := v.Args[2]
14145 v_1 := v.Args[1]
14146 v_0 := v.Args[0]
14147 b := v.Block
14148 typ := &b.Func.Config.Types
14149
14150
14151
14152 for {
14153 off1 := auxIntToInt32(v.AuxInt)
14154 sym := auxToSym(v.Aux)
14155 val := v_0
14156 if v_1.Op != OpAMD64ADDQconst {
14157 break
14158 }
14159 off2 := auxIntToInt32(v_1.AuxInt)
14160 base := v_1.Args[0]
14161 mem := v_2
14162 if !(is32Bit(int64(off1) + int64(off2))) {
14163 break
14164 }
14165 v.reset(OpAMD64MULSSload)
14166 v.AuxInt = int32ToAuxInt(off1 + off2)
14167 v.Aux = symToAux(sym)
14168 v.AddArg3(val, base, mem)
14169 return true
14170 }
14171
14172
14173
14174 for {
14175 off1 := auxIntToInt32(v.AuxInt)
14176 sym1 := auxToSym(v.Aux)
14177 val := v_0
14178 if v_1.Op != OpAMD64LEAQ {
14179 break
14180 }
14181 off2 := auxIntToInt32(v_1.AuxInt)
14182 sym2 := auxToSym(v_1.Aux)
14183 base := v_1.Args[0]
14184 mem := v_2
14185 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14186 break
14187 }
14188 v.reset(OpAMD64MULSSload)
14189 v.AuxInt = int32ToAuxInt(off1 + off2)
14190 v.Aux = symToAux(mergeSym(sym1, sym2))
14191 v.AddArg3(val, base, mem)
14192 return true
14193 }
14194
14195
14196 for {
14197 off := auxIntToInt32(v.AuxInt)
14198 sym := auxToSym(v.Aux)
14199 x := v_0
14200 ptr := v_1
14201 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14202 break
14203 }
14204 y := v_2.Args[1]
14205 if ptr != v_2.Args[0] {
14206 break
14207 }
14208 v.reset(OpAMD64MULSS)
14209 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
14210 v0.AddArg(y)
14211 v.AddArg2(x, v0)
14212 return true
14213 }
14214 return false
14215 }
14216 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
14217 v_0 := v.Args[0]
14218
14219
14220 for {
14221 if v_0.Op != OpAMD64NEGL {
14222 break
14223 }
14224 x := v_0.Args[0]
14225 v.copyOf(x)
14226 return true
14227 }
14228
14229
14230
14231 for {
14232 s := v_0
14233 if s.Op != OpAMD64SUBL {
14234 break
14235 }
14236 y := s.Args[1]
14237 x := s.Args[0]
14238 if !(s.Uses == 1) {
14239 break
14240 }
14241 v.reset(OpAMD64SUBL)
14242 v.AddArg2(y, x)
14243 return true
14244 }
14245
14246
14247 for {
14248 if v_0.Op != OpAMD64MOVLconst {
14249 break
14250 }
14251 c := auxIntToInt32(v_0.AuxInt)
14252 v.reset(OpAMD64MOVLconst)
14253 v.AuxInt = int32ToAuxInt(-c)
14254 return true
14255 }
14256 return false
14257 }
14258 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
14259 v_0 := v.Args[0]
14260
14261
14262 for {
14263 if v_0.Op != OpAMD64NEGQ {
14264 break
14265 }
14266 x := v_0.Args[0]
14267 v.copyOf(x)
14268 return true
14269 }
14270
14271
14272
14273 for {
14274 s := v_0
14275 if s.Op != OpAMD64SUBQ {
14276 break
14277 }
14278 y := s.Args[1]
14279 x := s.Args[0]
14280 if !(s.Uses == 1) {
14281 break
14282 }
14283 v.reset(OpAMD64SUBQ)
14284 v.AddArg2(y, x)
14285 return true
14286 }
14287
14288
14289 for {
14290 if v_0.Op != OpAMD64MOVQconst {
14291 break
14292 }
14293 c := auxIntToInt64(v_0.AuxInt)
14294 v.reset(OpAMD64MOVQconst)
14295 v.AuxInt = int64ToAuxInt(-c)
14296 return true
14297 }
14298
14299
14300
14301 for {
14302 if v_0.Op != OpAMD64ADDQconst {
14303 break
14304 }
14305 c := auxIntToInt32(v_0.AuxInt)
14306 v_0_0 := v_0.Args[0]
14307 if v_0_0.Op != OpAMD64NEGQ {
14308 break
14309 }
14310 x := v_0_0.Args[0]
14311 if !(c != -(1 << 31)) {
14312 break
14313 }
14314 v.reset(OpAMD64ADDQconst)
14315 v.AuxInt = int32ToAuxInt(-c)
14316 v.AddArg(x)
14317 return true
14318 }
14319 return false
14320 }
14321 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
14322 v_0 := v.Args[0]
14323
14324
14325 for {
14326 if v_0.Op != OpAMD64MOVLconst {
14327 break
14328 }
14329 c := auxIntToInt32(v_0.AuxInt)
14330 v.reset(OpAMD64MOVLconst)
14331 v.AuxInt = int32ToAuxInt(^c)
14332 return true
14333 }
14334 return false
14335 }
14336 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
14337 v_0 := v.Args[0]
14338
14339
14340 for {
14341 if v_0.Op != OpAMD64MOVQconst {
14342 break
14343 }
14344 c := auxIntToInt64(v_0.AuxInt)
14345 v.reset(OpAMD64MOVQconst)
14346 v.AuxInt = int64ToAuxInt(^c)
14347 return true
14348 }
14349 return false
14350 }
14351 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
14352 v_1 := v.Args[1]
14353 v_0 := v.Args[0]
14354
14355
14356 for {
14357 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14358 if v_0.Op != OpAMD64SHLL {
14359 continue
14360 }
14361 y := v_0.Args[1]
14362 v_0_0 := v_0.Args[0]
14363 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
14364 continue
14365 }
14366 x := v_1
14367 v.reset(OpAMD64BTSL)
14368 v.AddArg2(x, y)
14369 return true
14370 }
14371 break
14372 }
14373
14374
14375 for {
14376 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14377 x := v_0
14378 if v_1.Op != OpAMD64MOVLconst {
14379 continue
14380 }
14381 c := auxIntToInt32(v_1.AuxInt)
14382 v.reset(OpAMD64ORLconst)
14383 v.AuxInt = int32ToAuxInt(c)
14384 v.AddArg(x)
14385 return true
14386 }
14387 break
14388 }
14389
14390
14391 for {
14392 x := v_0
14393 if x != v_1 {
14394 break
14395 }
14396 v.copyOf(x)
14397 return true
14398 }
14399
14400
14401
14402 for {
14403 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14404 x := v_0
14405 l := v_1
14406 if l.Op != OpAMD64MOVLload {
14407 continue
14408 }
14409 off := auxIntToInt32(l.AuxInt)
14410 sym := auxToSym(l.Aux)
14411 mem := l.Args[1]
14412 ptr := l.Args[0]
14413 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14414 continue
14415 }
14416 v.reset(OpAMD64ORLload)
14417 v.AuxInt = int32ToAuxInt(off)
14418 v.Aux = symToAux(sym)
14419 v.AddArg3(x, ptr, mem)
14420 return true
14421 }
14422 break
14423 }
14424 return false
14425 }
14426 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
14427 v_0 := v.Args[0]
14428
14429
14430 for {
14431 c := auxIntToInt32(v.AuxInt)
14432 if v_0.Op != OpAMD64ORLconst {
14433 break
14434 }
14435 d := auxIntToInt32(v_0.AuxInt)
14436 x := v_0.Args[0]
14437 v.reset(OpAMD64ORLconst)
14438 v.AuxInt = int32ToAuxInt(c | d)
14439 v.AddArg(x)
14440 return true
14441 }
14442
14443
14444
14445 for {
14446 c := auxIntToInt32(v.AuxInt)
14447 x := v_0
14448 if !(c == 0) {
14449 break
14450 }
14451 v.copyOf(x)
14452 return true
14453 }
14454
14455
14456
14457 for {
14458 c := auxIntToInt32(v.AuxInt)
14459 if !(c == -1) {
14460 break
14461 }
14462 v.reset(OpAMD64MOVLconst)
14463 v.AuxInt = int32ToAuxInt(-1)
14464 return true
14465 }
14466
14467
14468 for {
14469 c := auxIntToInt32(v.AuxInt)
14470 if v_0.Op != OpAMD64MOVLconst {
14471 break
14472 }
14473 d := auxIntToInt32(v_0.AuxInt)
14474 v.reset(OpAMD64MOVLconst)
14475 v.AuxInt = int32ToAuxInt(c | d)
14476 return true
14477 }
14478 return false
14479 }
14480 func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
14481 v_1 := v.Args[1]
14482 v_0 := v.Args[0]
14483
14484
14485
14486 for {
14487 valoff1 := auxIntToValAndOff(v.AuxInt)
14488 sym := auxToSym(v.Aux)
14489 if v_0.Op != OpAMD64ADDQconst {
14490 break
14491 }
14492 off2 := auxIntToInt32(v_0.AuxInt)
14493 base := v_0.Args[0]
14494 mem := v_1
14495 if !(ValAndOff(valoff1).canAdd32(off2)) {
14496 break
14497 }
14498 v.reset(OpAMD64ORLconstmodify)
14499 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14500 v.Aux = symToAux(sym)
14501 v.AddArg2(base, mem)
14502 return true
14503 }
14504
14505
14506
14507 for {
14508 valoff1 := auxIntToValAndOff(v.AuxInt)
14509 sym1 := auxToSym(v.Aux)
14510 if v_0.Op != OpAMD64LEAQ {
14511 break
14512 }
14513 off2 := auxIntToInt32(v_0.AuxInt)
14514 sym2 := auxToSym(v_0.Aux)
14515 base := v_0.Args[0]
14516 mem := v_1
14517 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14518 break
14519 }
14520 v.reset(OpAMD64ORLconstmodify)
14521 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14522 v.Aux = symToAux(mergeSym(sym1, sym2))
14523 v.AddArg2(base, mem)
14524 return true
14525 }
14526 return false
14527 }
14528 func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
14529 v_2 := v.Args[2]
14530 v_1 := v.Args[1]
14531 v_0 := v.Args[0]
14532 b := v.Block
14533 typ := &b.Func.Config.Types
14534
14535
14536
14537 for {
14538 off1 := auxIntToInt32(v.AuxInt)
14539 sym := auxToSym(v.Aux)
14540 val := v_0
14541 if v_1.Op != OpAMD64ADDQconst {
14542 break
14543 }
14544 off2 := auxIntToInt32(v_1.AuxInt)
14545 base := v_1.Args[0]
14546 mem := v_2
14547 if !(is32Bit(int64(off1) + int64(off2))) {
14548 break
14549 }
14550 v.reset(OpAMD64ORLload)
14551 v.AuxInt = int32ToAuxInt(off1 + off2)
14552 v.Aux = symToAux(sym)
14553 v.AddArg3(val, base, mem)
14554 return true
14555 }
14556
14557
14558
14559 for {
14560 off1 := auxIntToInt32(v.AuxInt)
14561 sym1 := auxToSym(v.Aux)
14562 val := v_0
14563 if v_1.Op != OpAMD64LEAQ {
14564 break
14565 }
14566 off2 := auxIntToInt32(v_1.AuxInt)
14567 sym2 := auxToSym(v_1.Aux)
14568 base := v_1.Args[0]
14569 mem := v_2
14570 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14571 break
14572 }
14573 v.reset(OpAMD64ORLload)
14574 v.AuxInt = int32ToAuxInt(off1 + off2)
14575 v.Aux = symToAux(mergeSym(sym1, sym2))
14576 v.AddArg3(val, base, mem)
14577 return true
14578 }
14579
14580
14581 for {
14582 off := auxIntToInt32(v.AuxInt)
14583 sym := auxToSym(v.Aux)
14584 x := v_0
14585 ptr := v_1
14586 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14587 break
14588 }
14589 y := v_2.Args[1]
14590 if ptr != v_2.Args[0] {
14591 break
14592 }
14593 v.reset(OpAMD64ORL)
14594 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
14595 v0.AddArg(y)
14596 v.AddArg2(x, v0)
14597 return true
14598 }
14599 return false
14600 }
14601 func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
14602 v_2 := v.Args[2]
14603 v_1 := v.Args[1]
14604 v_0 := v.Args[0]
14605
14606
14607
14608 for {
14609 off1 := auxIntToInt32(v.AuxInt)
14610 sym := auxToSym(v.Aux)
14611 if v_0.Op != OpAMD64ADDQconst {
14612 break
14613 }
14614 off2 := auxIntToInt32(v_0.AuxInt)
14615 base := v_0.Args[0]
14616 val := v_1
14617 mem := v_2
14618 if !(is32Bit(int64(off1) + int64(off2))) {
14619 break
14620 }
14621 v.reset(OpAMD64ORLmodify)
14622 v.AuxInt = int32ToAuxInt(off1 + off2)
14623 v.Aux = symToAux(sym)
14624 v.AddArg3(base, val, mem)
14625 return true
14626 }
14627
14628
14629
14630 for {
14631 off1 := auxIntToInt32(v.AuxInt)
14632 sym1 := auxToSym(v.Aux)
14633 if v_0.Op != OpAMD64LEAQ {
14634 break
14635 }
14636 off2 := auxIntToInt32(v_0.AuxInt)
14637 sym2 := auxToSym(v_0.Aux)
14638 base := v_0.Args[0]
14639 val := v_1
14640 mem := v_2
14641 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14642 break
14643 }
14644 v.reset(OpAMD64ORLmodify)
14645 v.AuxInt = int32ToAuxInt(off1 + off2)
14646 v.Aux = symToAux(mergeSym(sym1, sym2))
14647 v.AddArg3(base, val, mem)
14648 return true
14649 }
14650 return false
14651 }
14652 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
14653 v_1 := v.Args[1]
14654 v_0 := v.Args[0]
14655
14656
14657 for {
14658 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14659 if v_0.Op != OpAMD64SHLQ {
14660 continue
14661 }
14662 y := v_0.Args[1]
14663 v_0_0 := v_0.Args[0]
14664 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
14665 continue
14666 }
14667 x := v_1
14668 v.reset(OpAMD64BTSQ)
14669 v.AddArg2(x, y)
14670 return true
14671 }
14672 break
14673 }
14674
14675
14676
14677 for {
14678 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14679 if v_0.Op != OpAMD64MOVQconst {
14680 continue
14681 }
14682 c := auxIntToInt64(v_0.AuxInt)
14683 x := v_1
14684 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
14685 continue
14686 }
14687 v.reset(OpAMD64BTSQconst)
14688 v.AuxInt = int8ToAuxInt(int8(log64(c)))
14689 v.AddArg(x)
14690 return true
14691 }
14692 break
14693 }
14694
14695
14696
14697 for {
14698 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14699 x := v_0
14700 if v_1.Op != OpAMD64MOVQconst {
14701 continue
14702 }
14703 c := auxIntToInt64(v_1.AuxInt)
14704 if !(is32Bit(c)) {
14705 continue
14706 }
14707 v.reset(OpAMD64ORQconst)
14708 v.AuxInt = int32ToAuxInt(int32(c))
14709 v.AddArg(x)
14710 return true
14711 }
14712 break
14713 }
14714
14715
14716 for {
14717 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14718 x := v_0
14719 if v_1.Op != OpAMD64MOVLconst {
14720 continue
14721 }
14722 c := auxIntToInt32(v_1.AuxInt)
14723 v.reset(OpAMD64ORQconst)
14724 v.AuxInt = int32ToAuxInt(c)
14725 v.AddArg(x)
14726 return true
14727 }
14728 break
14729 }
14730
14731
14732 for {
14733 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14734 if v_0.Op != OpAMD64SHRQ {
14735 continue
14736 }
14737 bits := v_0.Args[1]
14738 lo := v_0.Args[0]
14739 if v_1.Op != OpAMD64SHLQ {
14740 continue
14741 }
14742 _ = v_1.Args[1]
14743 hi := v_1.Args[0]
14744 v_1_1 := v_1.Args[1]
14745 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14746 continue
14747 }
14748 v.reset(OpAMD64SHRDQ)
14749 v.AddArg3(lo, hi, bits)
14750 return true
14751 }
14752 break
14753 }
14754
14755
14756 for {
14757 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14758 if v_0.Op != OpAMD64SHLQ {
14759 continue
14760 }
14761 bits := v_0.Args[1]
14762 lo := v_0.Args[0]
14763 if v_1.Op != OpAMD64SHRQ {
14764 continue
14765 }
14766 _ = v_1.Args[1]
14767 hi := v_1.Args[0]
14768 v_1_1 := v_1.Args[1]
14769 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14770 continue
14771 }
14772 v.reset(OpAMD64SHLDQ)
14773 v.AddArg3(lo, hi, bits)
14774 return true
14775 }
14776 break
14777 }
14778
14779
14780 for {
14781 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14782 if v_0.Op != OpAMD64SHRXQ {
14783 continue
14784 }
14785 bits := v_0.Args[1]
14786 lo := v_0.Args[0]
14787 if v_1.Op != OpAMD64SHLXQ {
14788 continue
14789 }
14790 _ = v_1.Args[1]
14791 hi := v_1.Args[0]
14792 v_1_1 := v_1.Args[1]
14793 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14794 continue
14795 }
14796 v.reset(OpAMD64SHRDQ)
14797 v.AddArg3(lo, hi, bits)
14798 return true
14799 }
14800 break
14801 }
14802
14803
14804 for {
14805 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14806 if v_0.Op != OpAMD64SHLXQ {
14807 continue
14808 }
14809 bits := v_0.Args[1]
14810 lo := v_0.Args[0]
14811 if v_1.Op != OpAMD64SHRXQ {
14812 continue
14813 }
14814 _ = v_1.Args[1]
14815 hi := v_1.Args[0]
14816 v_1_1 := v_1.Args[1]
14817 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14818 continue
14819 }
14820 v.reset(OpAMD64SHLDQ)
14821 v.AddArg3(lo, hi, bits)
14822 return true
14823 }
14824 break
14825 }
14826
14827
14828 for {
14829 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14830 if v_0.Op != OpAMD64MOVQconst {
14831 continue
14832 }
14833 c := auxIntToInt64(v_0.AuxInt)
14834 if v_1.Op != OpAMD64MOVQconst {
14835 continue
14836 }
14837 d := auxIntToInt64(v_1.AuxInt)
14838 v.reset(OpAMD64MOVQconst)
14839 v.AuxInt = int64ToAuxInt(c | d)
14840 return true
14841 }
14842 break
14843 }
14844
14845
14846 for {
14847 x := v_0
14848 if x != v_1 {
14849 break
14850 }
14851 v.copyOf(x)
14852 return true
14853 }
14854
14855
14856
14857 for {
14858 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14859 x := v_0
14860 l := v_1
14861 if l.Op != OpAMD64MOVQload {
14862 continue
14863 }
14864 off := auxIntToInt32(l.AuxInt)
14865 sym := auxToSym(l.Aux)
14866 mem := l.Args[1]
14867 ptr := l.Args[0]
14868 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14869 continue
14870 }
14871 v.reset(OpAMD64ORQload)
14872 v.AuxInt = int32ToAuxInt(off)
14873 v.Aux = symToAux(sym)
14874 v.AddArg3(x, ptr, mem)
14875 return true
14876 }
14877 break
14878 }
14879 return false
14880 }
14881 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
14882 v_0 := v.Args[0]
14883
14884
14885 for {
14886 c := auxIntToInt32(v.AuxInt)
14887 if v_0.Op != OpAMD64ORQconst {
14888 break
14889 }
14890 d := auxIntToInt32(v_0.AuxInt)
14891 x := v_0.Args[0]
14892 v.reset(OpAMD64ORQconst)
14893 v.AuxInt = int32ToAuxInt(c | d)
14894 v.AddArg(x)
14895 return true
14896 }
14897
14898
14899 for {
14900 if auxIntToInt32(v.AuxInt) != 0 {
14901 break
14902 }
14903 x := v_0
14904 v.copyOf(x)
14905 return true
14906 }
14907
14908
14909 for {
14910 if auxIntToInt32(v.AuxInt) != -1 {
14911 break
14912 }
14913 v.reset(OpAMD64MOVQconst)
14914 v.AuxInt = int64ToAuxInt(-1)
14915 return true
14916 }
14917
14918
14919 for {
14920 c := auxIntToInt32(v.AuxInt)
14921 if v_0.Op != OpAMD64MOVQconst {
14922 break
14923 }
14924 d := auxIntToInt64(v_0.AuxInt)
14925 v.reset(OpAMD64MOVQconst)
14926 v.AuxInt = int64ToAuxInt(int64(c) | d)
14927 return true
14928 }
14929 return false
14930 }
14931 func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
14932 v_1 := v.Args[1]
14933 v_0 := v.Args[0]
14934
14935
14936
14937 for {
14938 valoff1 := auxIntToValAndOff(v.AuxInt)
14939 sym := auxToSym(v.Aux)
14940 if v_0.Op != OpAMD64ADDQconst {
14941 break
14942 }
14943 off2 := auxIntToInt32(v_0.AuxInt)
14944 base := v_0.Args[0]
14945 mem := v_1
14946 if !(ValAndOff(valoff1).canAdd32(off2)) {
14947 break
14948 }
14949 v.reset(OpAMD64ORQconstmodify)
14950 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14951 v.Aux = symToAux(sym)
14952 v.AddArg2(base, mem)
14953 return true
14954 }
14955
14956
14957
14958 for {
14959 valoff1 := auxIntToValAndOff(v.AuxInt)
14960 sym1 := auxToSym(v.Aux)
14961 if v_0.Op != OpAMD64LEAQ {
14962 break
14963 }
14964 off2 := auxIntToInt32(v_0.AuxInt)
14965 sym2 := auxToSym(v_0.Aux)
14966 base := v_0.Args[0]
14967 mem := v_1
14968 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14969 break
14970 }
14971 v.reset(OpAMD64ORQconstmodify)
14972 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14973 v.Aux = symToAux(mergeSym(sym1, sym2))
14974 v.AddArg2(base, mem)
14975 return true
14976 }
14977 return false
14978 }
14979 func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
14980 v_2 := v.Args[2]
14981 v_1 := v.Args[1]
14982 v_0 := v.Args[0]
14983 b := v.Block
14984 typ := &b.Func.Config.Types
14985
14986
14987
14988 for {
14989 off1 := auxIntToInt32(v.AuxInt)
14990 sym := auxToSym(v.Aux)
14991 val := v_0
14992 if v_1.Op != OpAMD64ADDQconst {
14993 break
14994 }
14995 off2 := auxIntToInt32(v_1.AuxInt)
14996 base := v_1.Args[0]
14997 mem := v_2
14998 if !(is32Bit(int64(off1) + int64(off2))) {
14999 break
15000 }
15001 v.reset(OpAMD64ORQload)
15002 v.AuxInt = int32ToAuxInt(off1 + off2)
15003 v.Aux = symToAux(sym)
15004 v.AddArg3(val, base, mem)
15005 return true
15006 }
15007
15008
15009
15010 for {
15011 off1 := auxIntToInt32(v.AuxInt)
15012 sym1 := auxToSym(v.Aux)
15013 val := v_0
15014 if v_1.Op != OpAMD64LEAQ {
15015 break
15016 }
15017 off2 := auxIntToInt32(v_1.AuxInt)
15018 sym2 := auxToSym(v_1.Aux)
15019 base := v_1.Args[0]
15020 mem := v_2
15021 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15022 break
15023 }
15024 v.reset(OpAMD64ORQload)
15025 v.AuxInt = int32ToAuxInt(off1 + off2)
15026 v.Aux = symToAux(mergeSym(sym1, sym2))
15027 v.AddArg3(val, base, mem)
15028 return true
15029 }
15030
15031
15032 for {
15033 off := auxIntToInt32(v.AuxInt)
15034 sym := auxToSym(v.Aux)
15035 x := v_0
15036 ptr := v_1
15037 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
15038 break
15039 }
15040 y := v_2.Args[1]
15041 if ptr != v_2.Args[0] {
15042 break
15043 }
15044 v.reset(OpAMD64ORQ)
15045 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
15046 v0.AddArg(y)
15047 v.AddArg2(x, v0)
15048 return true
15049 }
15050 return false
15051 }
15052 func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
15053 v_2 := v.Args[2]
15054 v_1 := v.Args[1]
15055 v_0 := v.Args[0]
15056
15057
15058
15059 for {
15060 off1 := auxIntToInt32(v.AuxInt)
15061 sym := auxToSym(v.Aux)
15062 if v_0.Op != OpAMD64ADDQconst {
15063 break
15064 }
15065 off2 := auxIntToInt32(v_0.AuxInt)
15066 base := v_0.Args[0]
15067 val := v_1
15068 mem := v_2
15069 if !(is32Bit(int64(off1) + int64(off2))) {
15070 break
15071 }
15072 v.reset(OpAMD64ORQmodify)
15073 v.AuxInt = int32ToAuxInt(off1 + off2)
15074 v.Aux = symToAux(sym)
15075 v.AddArg3(base, val, mem)
15076 return true
15077 }
15078
15079
15080
15081 for {
15082 off1 := auxIntToInt32(v.AuxInt)
15083 sym1 := auxToSym(v.Aux)
15084 if v_0.Op != OpAMD64LEAQ {
15085 break
15086 }
15087 off2 := auxIntToInt32(v_0.AuxInt)
15088 sym2 := auxToSym(v_0.Aux)
15089 base := v_0.Args[0]
15090 val := v_1
15091 mem := v_2
15092 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15093 break
15094 }
15095 v.reset(OpAMD64ORQmodify)
15096 v.AuxInt = int32ToAuxInt(off1 + off2)
15097 v.Aux = symToAux(mergeSym(sym1, sym2))
15098 v.AddArg3(base, val, mem)
15099 return true
15100 }
15101 return false
15102 }
15103 func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
15104 v_1 := v.Args[1]
15105 v_0 := v.Args[0]
15106
15107
15108 for {
15109 x := v_0
15110 if v_1.Op != OpAMD64NEGQ {
15111 break
15112 }
15113 y := v_1.Args[0]
15114 v.reset(OpAMD64RORB)
15115 v.AddArg2(x, y)
15116 return true
15117 }
15118
15119
15120 for {
15121 x := v_0
15122 if v_1.Op != OpAMD64NEGL {
15123 break
15124 }
15125 y := v_1.Args[0]
15126 v.reset(OpAMD64RORB)
15127 v.AddArg2(x, y)
15128 return true
15129 }
15130
15131
15132 for {
15133 x := v_0
15134 if v_1.Op != OpAMD64MOVQconst {
15135 break
15136 }
15137 c := auxIntToInt64(v_1.AuxInt)
15138 v.reset(OpAMD64ROLBconst)
15139 v.AuxInt = int8ToAuxInt(int8(c & 7))
15140 v.AddArg(x)
15141 return true
15142 }
15143
15144
15145 for {
15146 x := v_0
15147 if v_1.Op != OpAMD64MOVLconst {
15148 break
15149 }
15150 c := auxIntToInt32(v_1.AuxInt)
15151 v.reset(OpAMD64ROLBconst)
15152 v.AuxInt = int8ToAuxInt(int8(c & 7))
15153 v.AddArg(x)
15154 return true
15155 }
15156 return false
15157 }
15158 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
15159 v_0 := v.Args[0]
15160
15161
15162 for {
15163 if auxIntToInt8(v.AuxInt) != 0 {
15164 break
15165 }
15166 x := v_0
15167 v.copyOf(x)
15168 return true
15169 }
15170 return false
15171 }
15172 func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
15173 v_1 := v.Args[1]
15174 v_0 := v.Args[0]
15175
15176
15177 for {
15178 x := v_0
15179 if v_1.Op != OpAMD64NEGQ {
15180 break
15181 }
15182 y := v_1.Args[0]
15183 v.reset(OpAMD64RORL)
15184 v.AddArg2(x, y)
15185 return true
15186 }
15187
15188
15189 for {
15190 x := v_0
15191 if v_1.Op != OpAMD64NEGL {
15192 break
15193 }
15194 y := v_1.Args[0]
15195 v.reset(OpAMD64RORL)
15196 v.AddArg2(x, y)
15197 return true
15198 }
15199
15200
15201 for {
15202 x := v_0
15203 if v_1.Op != OpAMD64MOVQconst {
15204 break
15205 }
15206 c := auxIntToInt64(v_1.AuxInt)
15207 v.reset(OpAMD64ROLLconst)
15208 v.AuxInt = int8ToAuxInt(int8(c & 31))
15209 v.AddArg(x)
15210 return true
15211 }
15212
15213
15214 for {
15215 x := v_0
15216 if v_1.Op != OpAMD64MOVLconst {
15217 break
15218 }
15219 c := auxIntToInt32(v_1.AuxInt)
15220 v.reset(OpAMD64ROLLconst)
15221 v.AuxInt = int8ToAuxInt(int8(c & 31))
15222 v.AddArg(x)
15223 return true
15224 }
15225 return false
15226 }
15227 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
15228 v_0 := v.Args[0]
15229
15230
15231 for {
15232 if auxIntToInt8(v.AuxInt) != 0 {
15233 break
15234 }
15235 x := v_0
15236 v.copyOf(x)
15237 return true
15238 }
15239 return false
15240 }
15241 func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
15242 v_1 := v.Args[1]
15243 v_0 := v.Args[0]
15244
15245
15246 for {
15247 x := v_0
15248 if v_1.Op != OpAMD64NEGQ {
15249 break
15250 }
15251 y := v_1.Args[0]
15252 v.reset(OpAMD64RORQ)
15253 v.AddArg2(x, y)
15254 return true
15255 }
15256
15257
15258 for {
15259 x := v_0
15260 if v_1.Op != OpAMD64NEGL {
15261 break
15262 }
15263 y := v_1.Args[0]
15264 v.reset(OpAMD64RORQ)
15265 v.AddArg2(x, y)
15266 return true
15267 }
15268
15269
15270 for {
15271 x := v_0
15272 if v_1.Op != OpAMD64MOVQconst {
15273 break
15274 }
15275 c := auxIntToInt64(v_1.AuxInt)
15276 v.reset(OpAMD64ROLQconst)
15277 v.AuxInt = int8ToAuxInt(int8(c & 63))
15278 v.AddArg(x)
15279 return true
15280 }
15281
15282
15283 for {
15284 x := v_0
15285 if v_1.Op != OpAMD64MOVLconst {
15286 break
15287 }
15288 c := auxIntToInt32(v_1.AuxInt)
15289 v.reset(OpAMD64ROLQconst)
15290 v.AuxInt = int8ToAuxInt(int8(c & 63))
15291 v.AddArg(x)
15292 return true
15293 }
15294 return false
15295 }
15296 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
15297 v_0 := v.Args[0]
15298
15299
15300 for {
15301 if auxIntToInt8(v.AuxInt) != 0 {
15302 break
15303 }
15304 x := v_0
15305 v.copyOf(x)
15306 return true
15307 }
15308 return false
15309 }
15310 func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
15311 v_1 := v.Args[1]
15312 v_0 := v.Args[0]
15313
15314
15315 for {
15316 x := v_0
15317 if v_1.Op != OpAMD64NEGQ {
15318 break
15319 }
15320 y := v_1.Args[0]
15321 v.reset(OpAMD64RORW)
15322 v.AddArg2(x, y)
15323 return true
15324 }
15325
15326
15327 for {
15328 x := v_0
15329 if v_1.Op != OpAMD64NEGL {
15330 break
15331 }
15332 y := v_1.Args[0]
15333 v.reset(OpAMD64RORW)
15334 v.AddArg2(x, y)
15335 return true
15336 }
15337
15338
15339 for {
15340 x := v_0
15341 if v_1.Op != OpAMD64MOVQconst {
15342 break
15343 }
15344 c := auxIntToInt64(v_1.AuxInt)
15345 v.reset(OpAMD64ROLWconst)
15346 v.AuxInt = int8ToAuxInt(int8(c & 15))
15347 v.AddArg(x)
15348 return true
15349 }
15350
15351
15352 for {
15353 x := v_0
15354 if v_1.Op != OpAMD64MOVLconst {
15355 break
15356 }
15357 c := auxIntToInt32(v_1.AuxInt)
15358 v.reset(OpAMD64ROLWconst)
15359 v.AuxInt = int8ToAuxInt(int8(c & 15))
15360 v.AddArg(x)
15361 return true
15362 }
15363 return false
15364 }
15365 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
15366 v_0 := v.Args[0]
15367
15368
15369 for {
15370 if auxIntToInt8(v.AuxInt) != 0 {
15371 break
15372 }
15373 x := v_0
15374 v.copyOf(x)
15375 return true
15376 }
15377 return false
15378 }
15379 func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
15380 v_1 := v.Args[1]
15381 v_0 := v.Args[0]
15382
15383
15384 for {
15385 x := v_0
15386 if v_1.Op != OpAMD64NEGQ {
15387 break
15388 }
15389 y := v_1.Args[0]
15390 v.reset(OpAMD64ROLB)
15391 v.AddArg2(x, y)
15392 return true
15393 }
15394
15395
15396 for {
15397 x := v_0
15398 if v_1.Op != OpAMD64NEGL {
15399 break
15400 }
15401 y := v_1.Args[0]
15402 v.reset(OpAMD64ROLB)
15403 v.AddArg2(x, y)
15404 return true
15405 }
15406
15407
15408 for {
15409 x := v_0
15410 if v_1.Op != OpAMD64MOVQconst {
15411 break
15412 }
15413 c := auxIntToInt64(v_1.AuxInt)
15414 v.reset(OpAMD64ROLBconst)
15415 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15416 v.AddArg(x)
15417 return true
15418 }
15419
15420
15421 for {
15422 x := v_0
15423 if v_1.Op != OpAMD64MOVLconst {
15424 break
15425 }
15426 c := auxIntToInt32(v_1.AuxInt)
15427 v.reset(OpAMD64ROLBconst)
15428 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15429 v.AddArg(x)
15430 return true
15431 }
15432 return false
15433 }
15434 func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
15435 v_1 := v.Args[1]
15436 v_0 := v.Args[0]
15437
15438
15439 for {
15440 x := v_0
15441 if v_1.Op != OpAMD64NEGQ {
15442 break
15443 }
15444 y := v_1.Args[0]
15445 v.reset(OpAMD64ROLL)
15446 v.AddArg2(x, y)
15447 return true
15448 }
15449
15450
15451 for {
15452 x := v_0
15453 if v_1.Op != OpAMD64NEGL {
15454 break
15455 }
15456 y := v_1.Args[0]
15457 v.reset(OpAMD64ROLL)
15458 v.AddArg2(x, y)
15459 return true
15460 }
15461
15462
15463 for {
15464 x := v_0
15465 if v_1.Op != OpAMD64MOVQconst {
15466 break
15467 }
15468 c := auxIntToInt64(v_1.AuxInt)
15469 v.reset(OpAMD64ROLLconst)
15470 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15471 v.AddArg(x)
15472 return true
15473 }
15474
15475
15476 for {
15477 x := v_0
15478 if v_1.Op != OpAMD64MOVLconst {
15479 break
15480 }
15481 c := auxIntToInt32(v_1.AuxInt)
15482 v.reset(OpAMD64ROLLconst)
15483 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15484 v.AddArg(x)
15485 return true
15486 }
15487 return false
15488 }
15489 func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
15490 v_1 := v.Args[1]
15491 v_0 := v.Args[0]
15492
15493
15494 for {
15495 x := v_0
15496 if v_1.Op != OpAMD64NEGQ {
15497 break
15498 }
15499 y := v_1.Args[0]
15500 v.reset(OpAMD64ROLQ)
15501 v.AddArg2(x, y)
15502 return true
15503 }
15504
15505
15506 for {
15507 x := v_0
15508 if v_1.Op != OpAMD64NEGL {
15509 break
15510 }
15511 y := v_1.Args[0]
15512 v.reset(OpAMD64ROLQ)
15513 v.AddArg2(x, y)
15514 return true
15515 }
15516
15517
15518 for {
15519 x := v_0
15520 if v_1.Op != OpAMD64MOVQconst {
15521 break
15522 }
15523 c := auxIntToInt64(v_1.AuxInt)
15524 v.reset(OpAMD64ROLQconst)
15525 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15526 v.AddArg(x)
15527 return true
15528 }
15529
15530
15531 for {
15532 x := v_0
15533 if v_1.Op != OpAMD64MOVLconst {
15534 break
15535 }
15536 c := auxIntToInt32(v_1.AuxInt)
15537 v.reset(OpAMD64ROLQconst)
15538 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15539 v.AddArg(x)
15540 return true
15541 }
15542 return false
15543 }
15544 func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
15545 v_1 := v.Args[1]
15546 v_0 := v.Args[0]
15547
15548
15549 for {
15550 x := v_0
15551 if v_1.Op != OpAMD64NEGQ {
15552 break
15553 }
15554 y := v_1.Args[0]
15555 v.reset(OpAMD64ROLW)
15556 v.AddArg2(x, y)
15557 return true
15558 }
15559
15560
15561 for {
15562 x := v_0
15563 if v_1.Op != OpAMD64NEGL {
15564 break
15565 }
15566 y := v_1.Args[0]
15567 v.reset(OpAMD64ROLW)
15568 v.AddArg2(x, y)
15569 return true
15570 }
15571
15572
15573 for {
15574 x := v_0
15575 if v_1.Op != OpAMD64MOVQconst {
15576 break
15577 }
15578 c := auxIntToInt64(v_1.AuxInt)
15579 v.reset(OpAMD64ROLWconst)
15580 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15581 v.AddArg(x)
15582 return true
15583 }
15584
15585
15586 for {
15587 x := v_0
15588 if v_1.Op != OpAMD64MOVLconst {
15589 break
15590 }
15591 c := auxIntToInt32(v_1.AuxInt)
15592 v.reset(OpAMD64ROLWconst)
15593 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15594 v.AddArg(x)
15595 return true
15596 }
15597 return false
15598 }
15599 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
15600 v_1 := v.Args[1]
15601 v_0 := v.Args[0]
15602
15603
15604 for {
15605 x := v_0
15606 if v_1.Op != OpAMD64MOVQconst {
15607 break
15608 }
15609 c := auxIntToInt64(v_1.AuxInt)
15610 v.reset(OpAMD64SARBconst)
15611 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15612 v.AddArg(x)
15613 return true
15614 }
15615
15616
15617 for {
15618 x := v_0
15619 if v_1.Op != OpAMD64MOVLconst {
15620 break
15621 }
15622 c := auxIntToInt32(v_1.AuxInt)
15623 v.reset(OpAMD64SARBconst)
15624 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15625 v.AddArg(x)
15626 return true
15627 }
15628 return false
15629 }
15630 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
15631 v_0 := v.Args[0]
15632
15633
15634 for {
15635 if auxIntToInt8(v.AuxInt) != 0 {
15636 break
15637 }
15638 x := v_0
15639 v.copyOf(x)
15640 return true
15641 }
15642
15643
15644 for {
15645 c := auxIntToInt8(v.AuxInt)
15646 if v_0.Op != OpAMD64MOVQconst {
15647 break
15648 }
15649 d := auxIntToInt64(v_0.AuxInt)
15650 v.reset(OpAMD64MOVQconst)
15651 v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
15652 return true
15653 }
15654 return false
15655 }
15656 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
15657 v_1 := v.Args[1]
15658 v_0 := v.Args[0]
15659 b := v.Block
15660
15661
15662 for {
15663 x := v_0
15664 if v_1.Op != OpAMD64MOVQconst {
15665 break
15666 }
15667 c := auxIntToInt64(v_1.AuxInt)
15668 v.reset(OpAMD64SARLconst)
15669 v.AuxInt = int8ToAuxInt(int8(c & 31))
15670 v.AddArg(x)
15671 return true
15672 }
15673
15674
15675 for {
15676 x := v_0
15677 if v_1.Op != OpAMD64MOVLconst {
15678 break
15679 }
15680 c := auxIntToInt32(v_1.AuxInt)
15681 v.reset(OpAMD64SARLconst)
15682 v.AuxInt = int8ToAuxInt(int8(c & 31))
15683 v.AddArg(x)
15684 return true
15685 }
15686
15687
15688
15689 for {
15690 x := v_0
15691 if v_1.Op != OpAMD64ADDQconst {
15692 break
15693 }
15694 c := auxIntToInt32(v_1.AuxInt)
15695 y := v_1.Args[0]
15696 if !(c&31 == 0) {
15697 break
15698 }
15699 v.reset(OpAMD64SARL)
15700 v.AddArg2(x, y)
15701 return true
15702 }
15703
15704
15705
15706 for {
15707 x := v_0
15708 if v_1.Op != OpAMD64NEGQ {
15709 break
15710 }
15711 t := v_1.Type
15712 v_1_0 := v_1.Args[0]
15713 if v_1_0.Op != OpAMD64ADDQconst {
15714 break
15715 }
15716 c := auxIntToInt32(v_1_0.AuxInt)
15717 y := v_1_0.Args[0]
15718 if !(c&31 == 0) {
15719 break
15720 }
15721 v.reset(OpAMD64SARL)
15722 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15723 v0.AddArg(y)
15724 v.AddArg2(x, v0)
15725 return true
15726 }
15727
15728
15729
15730 for {
15731 x := v_0
15732 if v_1.Op != OpAMD64ANDQconst {
15733 break
15734 }
15735 c := auxIntToInt32(v_1.AuxInt)
15736 y := v_1.Args[0]
15737 if !(c&31 == 31) {
15738 break
15739 }
15740 v.reset(OpAMD64SARL)
15741 v.AddArg2(x, y)
15742 return true
15743 }
15744
15745
15746
15747 for {
15748 x := v_0
15749 if v_1.Op != OpAMD64NEGQ {
15750 break
15751 }
15752 t := v_1.Type
15753 v_1_0 := v_1.Args[0]
15754 if v_1_0.Op != OpAMD64ANDQconst {
15755 break
15756 }
15757 c := auxIntToInt32(v_1_0.AuxInt)
15758 y := v_1_0.Args[0]
15759 if !(c&31 == 31) {
15760 break
15761 }
15762 v.reset(OpAMD64SARL)
15763 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15764 v0.AddArg(y)
15765 v.AddArg2(x, v0)
15766 return true
15767 }
15768
15769
15770
15771 for {
15772 x := v_0
15773 if v_1.Op != OpAMD64ADDLconst {
15774 break
15775 }
15776 c := auxIntToInt32(v_1.AuxInt)
15777 y := v_1.Args[0]
15778 if !(c&31 == 0) {
15779 break
15780 }
15781 v.reset(OpAMD64SARL)
15782 v.AddArg2(x, y)
15783 return true
15784 }
15785
15786
15787
15788 for {
15789 x := v_0
15790 if v_1.Op != OpAMD64NEGL {
15791 break
15792 }
15793 t := v_1.Type
15794 v_1_0 := v_1.Args[0]
15795 if v_1_0.Op != OpAMD64ADDLconst {
15796 break
15797 }
15798 c := auxIntToInt32(v_1_0.AuxInt)
15799 y := v_1_0.Args[0]
15800 if !(c&31 == 0) {
15801 break
15802 }
15803 v.reset(OpAMD64SARL)
15804 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15805 v0.AddArg(y)
15806 v.AddArg2(x, v0)
15807 return true
15808 }
15809
15810
15811
15812 for {
15813 x := v_0
15814 if v_1.Op != OpAMD64ANDLconst {
15815 break
15816 }
15817 c := auxIntToInt32(v_1.AuxInt)
15818 y := v_1.Args[0]
15819 if !(c&31 == 31) {
15820 break
15821 }
15822 v.reset(OpAMD64SARL)
15823 v.AddArg2(x, y)
15824 return true
15825 }
15826
15827
15828
15829 for {
15830 x := v_0
15831 if v_1.Op != OpAMD64NEGL {
15832 break
15833 }
15834 t := v_1.Type
15835 v_1_0 := v_1.Args[0]
15836 if v_1_0.Op != OpAMD64ANDLconst {
15837 break
15838 }
15839 c := auxIntToInt32(v_1_0.AuxInt)
15840 y := v_1_0.Args[0]
15841 if !(c&31 == 31) {
15842 break
15843 }
15844 v.reset(OpAMD64SARL)
15845 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15846 v0.AddArg(y)
15847 v.AddArg2(x, v0)
15848 return true
15849 }
15850
15851
15852
15853 for {
15854 l := v_0
15855 if l.Op != OpAMD64MOVLload {
15856 break
15857 }
15858 off := auxIntToInt32(l.AuxInt)
15859 sym := auxToSym(l.Aux)
15860 mem := l.Args[1]
15861 ptr := l.Args[0]
15862 x := v_1
15863 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
15864 break
15865 }
15866 v.reset(OpAMD64SARXLload)
15867 v.AuxInt = int32ToAuxInt(off)
15868 v.Aux = symToAux(sym)
15869 v.AddArg3(ptr, x, mem)
15870 return true
15871 }
15872 return false
15873 }
15874 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
15875 v_0 := v.Args[0]
15876
15877
15878 for {
15879 if auxIntToInt8(v.AuxInt) != 0 {
15880 break
15881 }
15882 x := v_0
15883 v.copyOf(x)
15884 return true
15885 }
15886
15887
15888 for {
15889 c := auxIntToInt8(v.AuxInt)
15890 if v_0.Op != OpAMD64MOVQconst {
15891 break
15892 }
15893 d := auxIntToInt64(v_0.AuxInt)
15894 v.reset(OpAMD64MOVQconst)
15895 v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
15896 return true
15897 }
15898 return false
15899 }
15900 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
15901 v_1 := v.Args[1]
15902 v_0 := v.Args[0]
15903 b := v.Block
15904
15905
15906 for {
15907 x := v_0
15908 if v_1.Op != OpAMD64MOVQconst {
15909 break
15910 }
15911 c := auxIntToInt64(v_1.AuxInt)
15912 v.reset(OpAMD64SARQconst)
15913 v.AuxInt = int8ToAuxInt(int8(c & 63))
15914 v.AddArg(x)
15915 return true
15916 }
15917
15918
15919 for {
15920 x := v_0
15921 if v_1.Op != OpAMD64MOVLconst {
15922 break
15923 }
15924 c := auxIntToInt32(v_1.AuxInt)
15925 v.reset(OpAMD64SARQconst)
15926 v.AuxInt = int8ToAuxInt(int8(c & 63))
15927 v.AddArg(x)
15928 return true
15929 }
15930
15931
15932
15933 for {
15934 x := v_0
15935 if v_1.Op != OpAMD64ADDQconst {
15936 break
15937 }
15938 c := auxIntToInt32(v_1.AuxInt)
15939 y := v_1.Args[0]
15940 if !(c&63 == 0) {
15941 break
15942 }
15943 v.reset(OpAMD64SARQ)
15944 v.AddArg2(x, y)
15945 return true
15946 }
15947
15948
15949
15950 for {
15951 x := v_0
15952 if v_1.Op != OpAMD64NEGQ {
15953 break
15954 }
15955 t := v_1.Type
15956 v_1_0 := v_1.Args[0]
15957 if v_1_0.Op != OpAMD64ADDQconst {
15958 break
15959 }
15960 c := auxIntToInt32(v_1_0.AuxInt)
15961 y := v_1_0.Args[0]
15962 if !(c&63 == 0) {
15963 break
15964 }
15965 v.reset(OpAMD64SARQ)
15966 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15967 v0.AddArg(y)
15968 v.AddArg2(x, v0)
15969 return true
15970 }
15971
15972
15973
15974 for {
15975 x := v_0
15976 if v_1.Op != OpAMD64ANDQconst {
15977 break
15978 }
15979 c := auxIntToInt32(v_1.AuxInt)
15980 y := v_1.Args[0]
15981 if !(c&63 == 63) {
15982 break
15983 }
15984 v.reset(OpAMD64SARQ)
15985 v.AddArg2(x, y)
15986 return true
15987 }
15988
15989
15990
15991 for {
15992 x := v_0
15993 if v_1.Op != OpAMD64NEGQ {
15994 break
15995 }
15996 t := v_1.Type
15997 v_1_0 := v_1.Args[0]
15998 if v_1_0.Op != OpAMD64ANDQconst {
15999 break
16000 }
16001 c := auxIntToInt32(v_1_0.AuxInt)
16002 y := v_1_0.Args[0]
16003 if !(c&63 == 63) {
16004 break
16005 }
16006 v.reset(OpAMD64SARQ)
16007 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
16008 v0.AddArg(y)
16009 v.AddArg2(x, v0)
16010 return true
16011 }
16012
16013
16014
16015 for {
16016 x := v_0
16017 if v_1.Op != OpAMD64ADDLconst {
16018 break
16019 }
16020 c := auxIntToInt32(v_1.AuxInt)
16021 y := v_1.Args[0]
16022 if !(c&63 == 0) {
16023 break
16024 }
16025 v.reset(OpAMD64SARQ)
16026 v.AddArg2(x, y)
16027 return true
16028 }
16029
16030
16031
16032 for {
16033 x := v_0
16034 if v_1.Op != OpAMD64NEGL {
16035 break
16036 }
16037 t := v_1.Type
16038 v_1_0 := v_1.Args[0]
16039 if v_1_0.Op != OpAMD64ADDLconst {
16040 break
16041 }
16042 c := auxIntToInt32(v_1_0.AuxInt)
16043 y := v_1_0.Args[0]
16044 if !(c&63 == 0) {
16045 break
16046 }
16047 v.reset(OpAMD64SARQ)
16048 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
16049 v0.AddArg(y)
16050 v.AddArg2(x, v0)
16051 return true
16052 }
16053
16054
16055
16056 for {
16057 x := v_0
16058 if v_1.Op != OpAMD64ANDLconst {
16059 break
16060 }
16061 c := auxIntToInt32(v_1.AuxInt)
16062 y := v_1.Args[0]
16063 if !(c&63 == 63) {
16064 break
16065 }
16066 v.reset(OpAMD64SARQ)
16067 v.AddArg2(x, y)
16068 return true
16069 }
16070
16071
16072
16073 for {
16074 x := v_0
16075 if v_1.Op != OpAMD64NEGL {
16076 break
16077 }
16078 t := v_1.Type
16079 v_1_0 := v_1.Args[0]
16080 if v_1_0.Op != OpAMD64ANDLconst {
16081 break
16082 }
16083 c := auxIntToInt32(v_1_0.AuxInt)
16084 y := v_1_0.Args[0]
16085 if !(c&63 == 63) {
16086 break
16087 }
16088 v.reset(OpAMD64SARQ)
16089 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
16090 v0.AddArg(y)
16091 v.AddArg2(x, v0)
16092 return true
16093 }
16094
16095
16096
16097 for {
16098 l := v_0
16099 if l.Op != OpAMD64MOVQload {
16100 break
16101 }
16102 off := auxIntToInt32(l.AuxInt)
16103 sym := auxToSym(l.Aux)
16104 mem := l.Args[1]
16105 ptr := l.Args[0]
16106 x := v_1
16107 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
16108 break
16109 }
16110 v.reset(OpAMD64SARXQload)
16111 v.AuxInt = int32ToAuxInt(off)
16112 v.Aux = symToAux(sym)
16113 v.AddArg3(ptr, x, mem)
16114 return true
16115 }
16116 return false
16117 }
16118 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
16119 v_0 := v.Args[0]
16120
16121
16122 for {
16123 if auxIntToInt8(v.AuxInt) != 0 {
16124 break
16125 }
16126 x := v_0
16127 v.copyOf(x)
16128 return true
16129 }
16130
16131
16132 for {
16133 c := auxIntToInt8(v.AuxInt)
16134 if v_0.Op != OpAMD64MOVQconst {
16135 break
16136 }
16137 d := auxIntToInt64(v_0.AuxInt)
16138 v.reset(OpAMD64MOVQconst)
16139 v.AuxInt = int64ToAuxInt(d >> uint64(c))
16140 return true
16141 }
16142 return false
16143 }
16144 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
16145 v_1 := v.Args[1]
16146 v_0 := v.Args[0]
16147
16148
16149 for {
16150 x := v_0
16151 if v_1.Op != OpAMD64MOVQconst {
16152 break
16153 }
16154 c := auxIntToInt64(v_1.AuxInt)
16155 v.reset(OpAMD64SARWconst)
16156 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
16157 v.AddArg(x)
16158 return true
16159 }
16160
16161
16162 for {
16163 x := v_0
16164 if v_1.Op != OpAMD64MOVLconst {
16165 break
16166 }
16167 c := auxIntToInt32(v_1.AuxInt)
16168 v.reset(OpAMD64SARWconst)
16169 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
16170 v.AddArg(x)
16171 return true
16172 }
16173 return false
16174 }
16175 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
16176 v_0 := v.Args[0]
16177
16178
16179 for {
16180 if auxIntToInt8(v.AuxInt) != 0 {
16181 break
16182 }
16183 x := v_0
16184 v.copyOf(x)
16185 return true
16186 }
16187
16188
16189 for {
16190 c := auxIntToInt8(v.AuxInt)
16191 if v_0.Op != OpAMD64MOVQconst {
16192 break
16193 }
16194 d := auxIntToInt64(v_0.AuxInt)
16195 v.reset(OpAMD64MOVQconst)
16196 v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
16197 return true
16198 }
16199 return false
16200 }
16201 func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool {
16202 v_2 := v.Args[2]
16203 v_1 := v.Args[1]
16204 v_0 := v.Args[0]
16205 b := v.Block
16206 typ := &b.Func.Config.Types
16207
16208
16209 for {
16210 off := auxIntToInt32(v.AuxInt)
16211 sym := auxToSym(v.Aux)
16212 ptr := v_0
16213 if v_1.Op != OpAMD64MOVLconst {
16214 break
16215 }
16216 c := auxIntToInt32(v_1.AuxInt)
16217 mem := v_2
16218 v.reset(OpAMD64SARLconst)
16219 v.AuxInt = int8ToAuxInt(int8(c & 31))
16220 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
16221 v0.AuxInt = int32ToAuxInt(off)
16222 v0.Aux = symToAux(sym)
16223 v0.AddArg2(ptr, mem)
16224 v.AddArg(v0)
16225 return true
16226 }
16227 return false
16228 }
16229 func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool {
16230 v_2 := v.Args[2]
16231 v_1 := v.Args[1]
16232 v_0 := v.Args[0]
16233 b := v.Block
16234 typ := &b.Func.Config.Types
16235
16236
16237 for {
16238 off := auxIntToInt32(v.AuxInt)
16239 sym := auxToSym(v.Aux)
16240 ptr := v_0
16241 if v_1.Op != OpAMD64MOVQconst {
16242 break
16243 }
16244 c := auxIntToInt64(v_1.AuxInt)
16245 mem := v_2
16246 v.reset(OpAMD64SARQconst)
16247 v.AuxInt = int8ToAuxInt(int8(c & 63))
16248 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
16249 v0.AuxInt = int32ToAuxInt(off)
16250 v0.Aux = symToAux(sym)
16251 v0.AddArg2(ptr, mem)
16252 v.AddArg(v0)
16253 return true
16254 }
16255
16256
16257 for {
16258 off := auxIntToInt32(v.AuxInt)
16259 sym := auxToSym(v.Aux)
16260 ptr := v_0
16261 if v_1.Op != OpAMD64MOVLconst {
16262 break
16263 }
16264 c := auxIntToInt32(v_1.AuxInt)
16265 mem := v_2
16266 v.reset(OpAMD64SARQconst)
16267 v.AuxInt = int8ToAuxInt(int8(c & 63))
16268 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
16269 v0.AuxInt = int32ToAuxInt(off)
16270 v0.Aux = symToAux(sym)
16271 v0.AddArg2(ptr, mem)
16272 v.AddArg(v0)
16273 return true
16274 }
16275 return false
16276 }
16277 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
16278 v_0 := v.Args[0]
16279
16280
16281 for {
16282 if v_0.Op != OpAMD64FlagEQ {
16283 break
16284 }
16285 v.reset(OpAMD64MOVLconst)
16286 v.AuxInt = int32ToAuxInt(0)
16287 return true
16288 }
16289
16290
16291 for {
16292 if v_0.Op != OpAMD64FlagLT_ULT {
16293 break
16294 }
16295 v.reset(OpAMD64MOVLconst)
16296 v.AuxInt = int32ToAuxInt(-1)
16297 return true
16298 }
16299
16300
16301 for {
16302 if v_0.Op != OpAMD64FlagLT_UGT {
16303 break
16304 }
16305 v.reset(OpAMD64MOVLconst)
16306 v.AuxInt = int32ToAuxInt(0)
16307 return true
16308 }
16309
16310
16311 for {
16312 if v_0.Op != OpAMD64FlagGT_ULT {
16313 break
16314 }
16315 v.reset(OpAMD64MOVLconst)
16316 v.AuxInt = int32ToAuxInt(-1)
16317 return true
16318 }
16319
16320
16321 for {
16322 if v_0.Op != OpAMD64FlagGT_UGT {
16323 break
16324 }
16325 v.reset(OpAMD64MOVLconst)
16326 v.AuxInt = int32ToAuxInt(0)
16327 return true
16328 }
16329 return false
16330 }
16331 func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
16332 v_2 := v.Args[2]
16333 v_1 := v.Args[1]
16334 v_0 := v.Args[0]
16335
16336
16337
16338 for {
16339 x := v_0
16340 if v_1.Op != OpAMD64MOVQconst {
16341 break
16342 }
16343 c := auxIntToInt64(v_1.AuxInt)
16344 borrow := v_2
16345 if !(is32Bit(c)) {
16346 break
16347 }
16348 v.reset(OpAMD64SBBQconst)
16349 v.AuxInt = int32ToAuxInt(int32(c))
16350 v.AddArg2(x, borrow)
16351 return true
16352 }
16353
16354
16355 for {
16356 x := v_0
16357 y := v_1
16358 if v_2.Op != OpAMD64FlagEQ {
16359 break
16360 }
16361 v.reset(OpAMD64SUBQborrow)
16362 v.AddArg2(x, y)
16363 return true
16364 }
16365 return false
16366 }
16367 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
16368 v_0 := v.Args[0]
16369
16370
16371 for {
16372 if v_0.Op != OpAMD64FlagEQ {
16373 break
16374 }
16375 v.reset(OpAMD64MOVQconst)
16376 v.AuxInt = int64ToAuxInt(0)
16377 return true
16378 }
16379
16380
16381 for {
16382 if v_0.Op != OpAMD64FlagLT_ULT {
16383 break
16384 }
16385 v.reset(OpAMD64MOVQconst)
16386 v.AuxInt = int64ToAuxInt(-1)
16387 return true
16388 }
16389
16390
16391 for {
16392 if v_0.Op != OpAMD64FlagLT_UGT {
16393 break
16394 }
16395 v.reset(OpAMD64MOVQconst)
16396 v.AuxInt = int64ToAuxInt(0)
16397 return true
16398 }
16399
16400
16401 for {
16402 if v_0.Op != OpAMD64FlagGT_ULT {
16403 break
16404 }
16405 v.reset(OpAMD64MOVQconst)
16406 v.AuxInt = int64ToAuxInt(-1)
16407 return true
16408 }
16409
16410
16411 for {
16412 if v_0.Op != OpAMD64FlagGT_UGT {
16413 break
16414 }
16415 v.reset(OpAMD64MOVQconst)
16416 v.AuxInt = int64ToAuxInt(0)
16417 return true
16418 }
16419 return false
16420 }
16421 func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
16422 v_1 := v.Args[1]
16423 v_0 := v.Args[0]
16424
16425
16426 for {
16427 c := auxIntToInt32(v.AuxInt)
16428 x := v_0
16429 if v_1.Op != OpAMD64FlagEQ {
16430 break
16431 }
16432 v.reset(OpAMD64SUBQconstborrow)
16433 v.AuxInt = int32ToAuxInt(c)
16434 v.AddArg(x)
16435 return true
16436 }
16437 return false
16438 }
16439 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
16440 v_0 := v.Args[0]
16441
16442
16443 for {
16444 if v_0.Op != OpAMD64InvertFlags {
16445 break
16446 }
16447 x := v_0.Args[0]
16448 v.reset(OpAMD64SETB)
16449 v.AddArg(x)
16450 return true
16451 }
16452
16453
16454 for {
16455 if v_0.Op != OpAMD64FlagEQ {
16456 break
16457 }
16458 v.reset(OpAMD64MOVLconst)
16459 v.AuxInt = int32ToAuxInt(0)
16460 return true
16461 }
16462
16463
16464 for {
16465 if v_0.Op != OpAMD64FlagLT_ULT {
16466 break
16467 }
16468 v.reset(OpAMD64MOVLconst)
16469 v.AuxInt = int32ToAuxInt(0)
16470 return true
16471 }
16472
16473
16474 for {
16475 if v_0.Op != OpAMD64FlagLT_UGT {
16476 break
16477 }
16478 v.reset(OpAMD64MOVLconst)
16479 v.AuxInt = int32ToAuxInt(1)
16480 return true
16481 }
16482
16483
16484 for {
16485 if v_0.Op != OpAMD64FlagGT_ULT {
16486 break
16487 }
16488 v.reset(OpAMD64MOVLconst)
16489 v.AuxInt = int32ToAuxInt(0)
16490 return true
16491 }
16492
16493
16494 for {
16495 if v_0.Op != OpAMD64FlagGT_UGT {
16496 break
16497 }
16498 v.reset(OpAMD64MOVLconst)
16499 v.AuxInt = int32ToAuxInt(1)
16500 return true
16501 }
16502 return false
16503 }
16504 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
16505 v_0 := v.Args[0]
16506
16507
16508 for {
16509 if v_0.Op != OpAMD64TESTQ {
16510 break
16511 }
16512 x := v_0.Args[1]
16513 if x != v_0.Args[0] {
16514 break
16515 }
16516 v.reset(OpConstBool)
16517 v.AuxInt = boolToAuxInt(true)
16518 return true
16519 }
16520
16521
16522 for {
16523 if v_0.Op != OpAMD64TESTL {
16524 break
16525 }
16526 x := v_0.Args[1]
16527 if x != v_0.Args[0] {
16528 break
16529 }
16530 v.reset(OpConstBool)
16531 v.AuxInt = boolToAuxInt(true)
16532 return true
16533 }
16534
16535
16536 for {
16537 if v_0.Op != OpAMD64TESTW {
16538 break
16539 }
16540 x := v_0.Args[1]
16541 if x != v_0.Args[0] {
16542 break
16543 }
16544 v.reset(OpConstBool)
16545 v.AuxInt = boolToAuxInt(true)
16546 return true
16547 }
16548
16549
16550 for {
16551 if v_0.Op != OpAMD64TESTB {
16552 break
16553 }
16554 x := v_0.Args[1]
16555 if x != v_0.Args[0] {
16556 break
16557 }
16558 v.reset(OpConstBool)
16559 v.AuxInt = boolToAuxInt(true)
16560 return true
16561 }
16562
16563
16564 for {
16565 if v_0.Op != OpAMD64InvertFlags {
16566 break
16567 }
16568 x := v_0.Args[0]
16569 v.reset(OpAMD64SETBE)
16570 v.AddArg(x)
16571 return true
16572 }
16573
16574
16575 for {
16576 if v_0.Op != OpAMD64FlagEQ {
16577 break
16578 }
16579 v.reset(OpAMD64MOVLconst)
16580 v.AuxInt = int32ToAuxInt(1)
16581 return true
16582 }
16583
16584
16585 for {
16586 if v_0.Op != OpAMD64FlagLT_ULT {
16587 break
16588 }
16589 v.reset(OpAMD64MOVLconst)
16590 v.AuxInt = int32ToAuxInt(0)
16591 return true
16592 }
16593
16594
16595 for {
16596 if v_0.Op != OpAMD64FlagLT_UGT {
16597 break
16598 }
16599 v.reset(OpAMD64MOVLconst)
16600 v.AuxInt = int32ToAuxInt(1)
16601 return true
16602 }
16603
16604
16605 for {
16606 if v_0.Op != OpAMD64FlagGT_ULT {
16607 break
16608 }
16609 v.reset(OpAMD64MOVLconst)
16610 v.AuxInt = int32ToAuxInt(0)
16611 return true
16612 }
16613
16614
16615 for {
16616 if v_0.Op != OpAMD64FlagGT_UGT {
16617 break
16618 }
16619 v.reset(OpAMD64MOVLconst)
16620 v.AuxInt = int32ToAuxInt(1)
16621 return true
16622 }
16623 return false
16624 }
16625 func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
16626 v_2 := v.Args[2]
16627 v_1 := v.Args[1]
16628 v_0 := v.Args[0]
16629 b := v.Block
16630 typ := &b.Func.Config.Types
16631
16632
16633 for {
16634 off := auxIntToInt32(v.AuxInt)
16635 sym := auxToSym(v.Aux)
16636 ptr := v_0
16637 if v_1.Op != OpAMD64InvertFlags {
16638 break
16639 }
16640 x := v_1.Args[0]
16641 mem := v_2
16642 v.reset(OpAMD64SETBEstore)
16643 v.AuxInt = int32ToAuxInt(off)
16644 v.Aux = symToAux(sym)
16645 v.AddArg3(ptr, x, mem)
16646 return true
16647 }
16648
16649
16650
16651 for {
16652 off1 := auxIntToInt32(v.AuxInt)
16653 sym := auxToSym(v.Aux)
16654 if v_0.Op != OpAMD64ADDQconst {
16655 break
16656 }
16657 off2 := auxIntToInt32(v_0.AuxInt)
16658 base := v_0.Args[0]
16659 val := v_1
16660 mem := v_2
16661 if !(is32Bit(int64(off1) + int64(off2))) {
16662 break
16663 }
16664 v.reset(OpAMD64SETAEstore)
16665 v.AuxInt = int32ToAuxInt(off1 + off2)
16666 v.Aux = symToAux(sym)
16667 v.AddArg3(base, val, mem)
16668 return true
16669 }
16670
16671
16672
16673 for {
16674 off1 := auxIntToInt32(v.AuxInt)
16675 sym1 := auxToSym(v.Aux)
16676 if v_0.Op != OpAMD64LEAQ {
16677 break
16678 }
16679 off2 := auxIntToInt32(v_0.AuxInt)
16680 sym2 := auxToSym(v_0.Aux)
16681 base := v_0.Args[0]
16682 val := v_1
16683 mem := v_2
16684 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16685 break
16686 }
16687 v.reset(OpAMD64SETAEstore)
16688 v.AuxInt = int32ToAuxInt(off1 + off2)
16689 v.Aux = symToAux(mergeSym(sym1, sym2))
16690 v.AddArg3(base, val, mem)
16691 return true
16692 }
16693
16694
16695 for {
16696 off := auxIntToInt32(v.AuxInt)
16697 sym := auxToSym(v.Aux)
16698 ptr := v_0
16699 if v_1.Op != OpAMD64FlagEQ {
16700 break
16701 }
16702 mem := v_2
16703 v.reset(OpAMD64MOVBstore)
16704 v.AuxInt = int32ToAuxInt(off)
16705 v.Aux = symToAux(sym)
16706 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16707 v0.AuxInt = int32ToAuxInt(1)
16708 v.AddArg3(ptr, v0, mem)
16709 return true
16710 }
16711
16712
16713 for {
16714 off := auxIntToInt32(v.AuxInt)
16715 sym := auxToSym(v.Aux)
16716 ptr := v_0
16717 if v_1.Op != OpAMD64FlagLT_ULT {
16718 break
16719 }
16720 mem := v_2
16721 v.reset(OpAMD64MOVBstore)
16722 v.AuxInt = int32ToAuxInt(off)
16723 v.Aux = symToAux(sym)
16724 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16725 v0.AuxInt = int32ToAuxInt(0)
16726 v.AddArg3(ptr, v0, mem)
16727 return true
16728 }
16729
16730
16731 for {
16732 off := auxIntToInt32(v.AuxInt)
16733 sym := auxToSym(v.Aux)
16734 ptr := v_0
16735 if v_1.Op != OpAMD64FlagLT_UGT {
16736 break
16737 }
16738 mem := v_2
16739 v.reset(OpAMD64MOVBstore)
16740 v.AuxInt = int32ToAuxInt(off)
16741 v.Aux = symToAux(sym)
16742 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16743 v0.AuxInt = int32ToAuxInt(1)
16744 v.AddArg3(ptr, v0, mem)
16745 return true
16746 }
16747
16748
16749 for {
16750 off := auxIntToInt32(v.AuxInt)
16751 sym := auxToSym(v.Aux)
16752 ptr := v_0
16753 if v_1.Op != OpAMD64FlagGT_ULT {
16754 break
16755 }
16756 mem := v_2
16757 v.reset(OpAMD64MOVBstore)
16758 v.AuxInt = int32ToAuxInt(off)
16759 v.Aux = symToAux(sym)
16760 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16761 v0.AuxInt = int32ToAuxInt(0)
16762 v.AddArg3(ptr, v0, mem)
16763 return true
16764 }
16765
16766
16767 for {
16768 off := auxIntToInt32(v.AuxInt)
16769 sym := auxToSym(v.Aux)
16770 ptr := v_0
16771 if v_1.Op != OpAMD64FlagGT_UGT {
16772 break
16773 }
16774 mem := v_2
16775 v.reset(OpAMD64MOVBstore)
16776 v.AuxInt = int32ToAuxInt(off)
16777 v.Aux = symToAux(sym)
16778 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16779 v0.AuxInt = int32ToAuxInt(1)
16780 v.AddArg3(ptr, v0, mem)
16781 return true
16782 }
16783 return false
16784 }
16785 func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
16786 v_2 := v.Args[2]
16787 v_1 := v.Args[1]
16788 v_0 := v.Args[0]
16789 b := v.Block
16790 typ := &b.Func.Config.Types
16791
16792
16793 for {
16794 off := auxIntToInt32(v.AuxInt)
16795 sym := auxToSym(v.Aux)
16796 ptr := v_0
16797 if v_1.Op != OpAMD64InvertFlags {
16798 break
16799 }
16800 x := v_1.Args[0]
16801 mem := v_2
16802 v.reset(OpAMD64SETBstore)
16803 v.AuxInt = int32ToAuxInt(off)
16804 v.Aux = symToAux(sym)
16805 v.AddArg3(ptr, x, mem)
16806 return true
16807 }
16808
16809
16810
16811 for {
16812 off1 := auxIntToInt32(v.AuxInt)
16813 sym := auxToSym(v.Aux)
16814 if v_0.Op != OpAMD64ADDQconst {
16815 break
16816 }
16817 off2 := auxIntToInt32(v_0.AuxInt)
16818 base := v_0.Args[0]
16819 val := v_1
16820 mem := v_2
16821 if !(is32Bit(int64(off1) + int64(off2))) {
16822 break
16823 }
16824 v.reset(OpAMD64SETAstore)
16825 v.AuxInt = int32ToAuxInt(off1 + off2)
16826 v.Aux = symToAux(sym)
16827 v.AddArg3(base, val, mem)
16828 return true
16829 }
16830
16831
16832
16833 for {
16834 off1 := auxIntToInt32(v.AuxInt)
16835 sym1 := auxToSym(v.Aux)
16836 if v_0.Op != OpAMD64LEAQ {
16837 break
16838 }
16839 off2 := auxIntToInt32(v_0.AuxInt)
16840 sym2 := auxToSym(v_0.Aux)
16841 base := v_0.Args[0]
16842 val := v_1
16843 mem := v_2
16844 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16845 break
16846 }
16847 v.reset(OpAMD64SETAstore)
16848 v.AuxInt = int32ToAuxInt(off1 + off2)
16849 v.Aux = symToAux(mergeSym(sym1, sym2))
16850 v.AddArg3(base, val, mem)
16851 return true
16852 }
16853
16854
16855 for {
16856 off := auxIntToInt32(v.AuxInt)
16857 sym := auxToSym(v.Aux)
16858 ptr := v_0
16859 if v_1.Op != OpAMD64FlagEQ {
16860 break
16861 }
16862 mem := v_2
16863 v.reset(OpAMD64MOVBstore)
16864 v.AuxInt = int32ToAuxInt(off)
16865 v.Aux = symToAux(sym)
16866 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16867 v0.AuxInt = int32ToAuxInt(0)
16868 v.AddArg3(ptr, v0, mem)
16869 return true
16870 }
16871
16872
16873 for {
16874 off := auxIntToInt32(v.AuxInt)
16875 sym := auxToSym(v.Aux)
16876 ptr := v_0
16877 if v_1.Op != OpAMD64FlagLT_ULT {
16878 break
16879 }
16880 mem := v_2
16881 v.reset(OpAMD64MOVBstore)
16882 v.AuxInt = int32ToAuxInt(off)
16883 v.Aux = symToAux(sym)
16884 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16885 v0.AuxInt = int32ToAuxInt(0)
16886 v.AddArg3(ptr, v0, mem)
16887 return true
16888 }
16889
16890
16891 for {
16892 off := auxIntToInt32(v.AuxInt)
16893 sym := auxToSym(v.Aux)
16894 ptr := v_0
16895 if v_1.Op != OpAMD64FlagLT_UGT {
16896 break
16897 }
16898 mem := v_2
16899 v.reset(OpAMD64MOVBstore)
16900 v.AuxInt = int32ToAuxInt(off)
16901 v.Aux = symToAux(sym)
16902 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16903 v0.AuxInt = int32ToAuxInt(1)
16904 v.AddArg3(ptr, v0, mem)
16905 return true
16906 }
16907
16908
16909 for {
16910 off := auxIntToInt32(v.AuxInt)
16911 sym := auxToSym(v.Aux)
16912 ptr := v_0
16913 if v_1.Op != OpAMD64FlagGT_ULT {
16914 break
16915 }
16916 mem := v_2
16917 v.reset(OpAMD64MOVBstore)
16918 v.AuxInt = int32ToAuxInt(off)
16919 v.Aux = symToAux(sym)
16920 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16921 v0.AuxInt = int32ToAuxInt(0)
16922 v.AddArg3(ptr, v0, mem)
16923 return true
16924 }
16925
16926
16927 for {
16928 off := auxIntToInt32(v.AuxInt)
16929 sym := auxToSym(v.Aux)
16930 ptr := v_0
16931 if v_1.Op != OpAMD64FlagGT_UGT {
16932 break
16933 }
16934 mem := v_2
16935 v.reset(OpAMD64MOVBstore)
16936 v.AuxInt = int32ToAuxInt(off)
16937 v.Aux = symToAux(sym)
16938 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16939 v0.AuxInt = int32ToAuxInt(1)
16940 v.AddArg3(ptr, v0, mem)
16941 return true
16942 }
16943 return false
16944 }
16945 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
16946 v_0 := v.Args[0]
16947
16948
16949 for {
16950 if v_0.Op != OpAMD64TESTQ {
16951 break
16952 }
16953 x := v_0.Args[1]
16954 if x != v_0.Args[0] {
16955 break
16956 }
16957 v.reset(OpConstBool)
16958 v.AuxInt = boolToAuxInt(false)
16959 return true
16960 }
16961
16962
16963 for {
16964 if v_0.Op != OpAMD64TESTL {
16965 break
16966 }
16967 x := v_0.Args[1]
16968 if x != v_0.Args[0] {
16969 break
16970 }
16971 v.reset(OpConstBool)
16972 v.AuxInt = boolToAuxInt(false)
16973 return true
16974 }
16975
16976
16977 for {
16978 if v_0.Op != OpAMD64TESTW {
16979 break
16980 }
16981 x := v_0.Args[1]
16982 if x != v_0.Args[0] {
16983 break
16984 }
16985 v.reset(OpConstBool)
16986 v.AuxInt = boolToAuxInt(false)
16987 return true
16988 }
16989
16990
16991 for {
16992 if v_0.Op != OpAMD64TESTB {
16993 break
16994 }
16995 x := v_0.Args[1]
16996 if x != v_0.Args[0] {
16997 break
16998 }
16999 v.reset(OpConstBool)
17000 v.AuxInt = boolToAuxInt(false)
17001 return true
17002 }
17003
17004
17005 for {
17006 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
17007 break
17008 }
17009 x := v_0.Args[0]
17010 v.reset(OpAMD64ANDLconst)
17011 v.AuxInt = int32ToAuxInt(1)
17012 v.AddArg(x)
17013 return true
17014 }
17015
17016
17017 for {
17018 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
17019 break
17020 }
17021 x := v_0.Args[0]
17022 v.reset(OpAMD64ANDQconst)
17023 v.AuxInt = int32ToAuxInt(1)
17024 v.AddArg(x)
17025 return true
17026 }
17027
17028
17029 for {
17030 if v_0.Op != OpAMD64InvertFlags {
17031 break
17032 }
17033 x := v_0.Args[0]
17034 v.reset(OpAMD64SETA)
17035 v.AddArg(x)
17036 return true
17037 }
17038
17039
17040 for {
17041 if v_0.Op != OpAMD64FlagEQ {
17042 break
17043 }
17044 v.reset(OpAMD64MOVLconst)
17045 v.AuxInt = int32ToAuxInt(0)
17046 return true
17047 }
17048
17049
17050 for {
17051 if v_0.Op != OpAMD64FlagLT_ULT {
17052 break
17053 }
17054 v.reset(OpAMD64MOVLconst)
17055 v.AuxInt = int32ToAuxInt(1)
17056 return true
17057 }
17058
17059
17060 for {
17061 if v_0.Op != OpAMD64FlagLT_UGT {
17062 break
17063 }
17064 v.reset(OpAMD64MOVLconst)
17065 v.AuxInt = int32ToAuxInt(0)
17066 return true
17067 }
17068
17069
17070 for {
17071 if v_0.Op != OpAMD64FlagGT_ULT {
17072 break
17073 }
17074 v.reset(OpAMD64MOVLconst)
17075 v.AuxInt = int32ToAuxInt(1)
17076 return true
17077 }
17078
17079
17080 for {
17081 if v_0.Op != OpAMD64FlagGT_UGT {
17082 break
17083 }
17084 v.reset(OpAMD64MOVLconst)
17085 v.AuxInt = int32ToAuxInt(0)
17086 return true
17087 }
17088 return false
17089 }
17090 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
17091 v_0 := v.Args[0]
17092
17093
17094 for {
17095 if v_0.Op != OpAMD64InvertFlags {
17096 break
17097 }
17098 x := v_0.Args[0]
17099 v.reset(OpAMD64SETAE)
17100 v.AddArg(x)
17101 return true
17102 }
17103
17104
17105 for {
17106 if v_0.Op != OpAMD64FlagEQ {
17107 break
17108 }
17109 v.reset(OpAMD64MOVLconst)
17110 v.AuxInt = int32ToAuxInt(1)
17111 return true
17112 }
17113
17114
17115 for {
17116 if v_0.Op != OpAMD64FlagLT_ULT {
17117 break
17118 }
17119 v.reset(OpAMD64MOVLconst)
17120 v.AuxInt = int32ToAuxInt(1)
17121 return true
17122 }
17123
17124
17125 for {
17126 if v_0.Op != OpAMD64FlagLT_UGT {
17127 break
17128 }
17129 v.reset(OpAMD64MOVLconst)
17130 v.AuxInt = int32ToAuxInt(0)
17131 return true
17132 }
17133
17134
17135 for {
17136 if v_0.Op != OpAMD64FlagGT_ULT {
17137 break
17138 }
17139 v.reset(OpAMD64MOVLconst)
17140 v.AuxInt = int32ToAuxInt(1)
17141 return true
17142 }
17143
17144
17145 for {
17146 if v_0.Op != OpAMD64FlagGT_UGT {
17147 break
17148 }
17149 v.reset(OpAMD64MOVLconst)
17150 v.AuxInt = int32ToAuxInt(0)
17151 return true
17152 }
17153 return false
17154 }
17155 func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
17156 v_2 := v.Args[2]
17157 v_1 := v.Args[1]
17158 v_0 := v.Args[0]
17159 b := v.Block
17160 typ := &b.Func.Config.Types
17161
17162
17163 for {
17164 off := auxIntToInt32(v.AuxInt)
17165 sym := auxToSym(v.Aux)
17166 ptr := v_0
17167 if v_1.Op != OpAMD64InvertFlags {
17168 break
17169 }
17170 x := v_1.Args[0]
17171 mem := v_2
17172 v.reset(OpAMD64SETAEstore)
17173 v.AuxInt = int32ToAuxInt(off)
17174 v.Aux = symToAux(sym)
17175 v.AddArg3(ptr, x, mem)
17176 return true
17177 }
17178
17179
17180
17181 for {
17182 off1 := auxIntToInt32(v.AuxInt)
17183 sym := auxToSym(v.Aux)
17184 if v_0.Op != OpAMD64ADDQconst {
17185 break
17186 }
17187 off2 := auxIntToInt32(v_0.AuxInt)
17188 base := v_0.Args[0]
17189 val := v_1
17190 mem := v_2
17191 if !(is32Bit(int64(off1) + int64(off2))) {
17192 break
17193 }
17194 v.reset(OpAMD64SETBEstore)
17195 v.AuxInt = int32ToAuxInt(off1 + off2)
17196 v.Aux = symToAux(sym)
17197 v.AddArg3(base, val, mem)
17198 return true
17199 }
17200
17201
17202
17203 for {
17204 off1 := auxIntToInt32(v.AuxInt)
17205 sym1 := auxToSym(v.Aux)
17206 if v_0.Op != OpAMD64LEAQ {
17207 break
17208 }
17209 off2 := auxIntToInt32(v_0.AuxInt)
17210 sym2 := auxToSym(v_0.Aux)
17211 base := v_0.Args[0]
17212 val := v_1
17213 mem := v_2
17214 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17215 break
17216 }
17217 v.reset(OpAMD64SETBEstore)
17218 v.AuxInt = int32ToAuxInt(off1 + off2)
17219 v.Aux = symToAux(mergeSym(sym1, sym2))
17220 v.AddArg3(base, val, mem)
17221 return true
17222 }
17223
17224
17225 for {
17226 off := auxIntToInt32(v.AuxInt)
17227 sym := auxToSym(v.Aux)
17228 ptr := v_0
17229 if v_1.Op != OpAMD64FlagEQ {
17230 break
17231 }
17232 mem := v_2
17233 v.reset(OpAMD64MOVBstore)
17234 v.AuxInt = int32ToAuxInt(off)
17235 v.Aux = symToAux(sym)
17236 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17237 v0.AuxInt = int32ToAuxInt(1)
17238 v.AddArg3(ptr, v0, mem)
17239 return true
17240 }
17241
17242
17243 for {
17244 off := auxIntToInt32(v.AuxInt)
17245 sym := auxToSym(v.Aux)
17246 ptr := v_0
17247 if v_1.Op != OpAMD64FlagLT_ULT {
17248 break
17249 }
17250 mem := v_2
17251 v.reset(OpAMD64MOVBstore)
17252 v.AuxInt = int32ToAuxInt(off)
17253 v.Aux = symToAux(sym)
17254 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17255 v0.AuxInt = int32ToAuxInt(1)
17256 v.AddArg3(ptr, v0, mem)
17257 return true
17258 }
17259
17260
17261 for {
17262 off := auxIntToInt32(v.AuxInt)
17263 sym := auxToSym(v.Aux)
17264 ptr := v_0
17265 if v_1.Op != OpAMD64FlagLT_UGT {
17266 break
17267 }
17268 mem := v_2
17269 v.reset(OpAMD64MOVBstore)
17270 v.AuxInt = int32ToAuxInt(off)
17271 v.Aux = symToAux(sym)
17272 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17273 v0.AuxInt = int32ToAuxInt(0)
17274 v.AddArg3(ptr, v0, mem)
17275 return true
17276 }
17277
17278
17279 for {
17280 off := auxIntToInt32(v.AuxInt)
17281 sym := auxToSym(v.Aux)
17282 ptr := v_0
17283 if v_1.Op != OpAMD64FlagGT_ULT {
17284 break
17285 }
17286 mem := v_2
17287 v.reset(OpAMD64MOVBstore)
17288 v.AuxInt = int32ToAuxInt(off)
17289 v.Aux = symToAux(sym)
17290 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17291 v0.AuxInt = int32ToAuxInt(1)
17292 v.AddArg3(ptr, v0, mem)
17293 return true
17294 }
17295
17296
17297 for {
17298 off := auxIntToInt32(v.AuxInt)
17299 sym := auxToSym(v.Aux)
17300 ptr := v_0
17301 if v_1.Op != OpAMD64FlagGT_UGT {
17302 break
17303 }
17304 mem := v_2
17305 v.reset(OpAMD64MOVBstore)
17306 v.AuxInt = int32ToAuxInt(off)
17307 v.Aux = symToAux(sym)
17308 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17309 v0.AuxInt = int32ToAuxInt(0)
17310 v.AddArg3(ptr, v0, mem)
17311 return true
17312 }
17313 return false
17314 }
17315 func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
17316 v_2 := v.Args[2]
17317 v_1 := v.Args[1]
17318 v_0 := v.Args[0]
17319 b := v.Block
17320 typ := &b.Func.Config.Types
17321
17322
17323 for {
17324 off := auxIntToInt32(v.AuxInt)
17325 sym := auxToSym(v.Aux)
17326 ptr := v_0
17327 if v_1.Op != OpAMD64InvertFlags {
17328 break
17329 }
17330 x := v_1.Args[0]
17331 mem := v_2
17332 v.reset(OpAMD64SETAstore)
17333 v.AuxInt = int32ToAuxInt(off)
17334 v.Aux = symToAux(sym)
17335 v.AddArg3(ptr, x, mem)
17336 return true
17337 }
17338
17339
17340
17341 for {
17342 off1 := auxIntToInt32(v.AuxInt)
17343 sym := auxToSym(v.Aux)
17344 if v_0.Op != OpAMD64ADDQconst {
17345 break
17346 }
17347 off2 := auxIntToInt32(v_0.AuxInt)
17348 base := v_0.Args[0]
17349 val := v_1
17350 mem := v_2
17351 if !(is32Bit(int64(off1) + int64(off2))) {
17352 break
17353 }
17354 v.reset(OpAMD64SETBstore)
17355 v.AuxInt = int32ToAuxInt(off1 + off2)
17356 v.Aux = symToAux(sym)
17357 v.AddArg3(base, val, mem)
17358 return true
17359 }
17360
17361
17362
17363 for {
17364 off1 := auxIntToInt32(v.AuxInt)
17365 sym1 := auxToSym(v.Aux)
17366 if v_0.Op != OpAMD64LEAQ {
17367 break
17368 }
17369 off2 := auxIntToInt32(v_0.AuxInt)
17370 sym2 := auxToSym(v_0.Aux)
17371 base := v_0.Args[0]
17372 val := v_1
17373 mem := v_2
17374 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17375 break
17376 }
17377 v.reset(OpAMD64SETBstore)
17378 v.AuxInt = int32ToAuxInt(off1 + off2)
17379 v.Aux = symToAux(mergeSym(sym1, sym2))
17380 v.AddArg3(base, val, mem)
17381 return true
17382 }
17383
17384
17385 for {
17386 off := auxIntToInt32(v.AuxInt)
17387 sym := auxToSym(v.Aux)
17388 ptr := v_0
17389 if v_1.Op != OpAMD64FlagEQ {
17390 break
17391 }
17392 mem := v_2
17393 v.reset(OpAMD64MOVBstore)
17394 v.AuxInt = int32ToAuxInt(off)
17395 v.Aux = symToAux(sym)
17396 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17397 v0.AuxInt = int32ToAuxInt(0)
17398 v.AddArg3(ptr, v0, mem)
17399 return true
17400 }
17401
17402
17403 for {
17404 off := auxIntToInt32(v.AuxInt)
17405 sym := auxToSym(v.Aux)
17406 ptr := v_0
17407 if v_1.Op != OpAMD64FlagLT_ULT {
17408 break
17409 }
17410 mem := v_2
17411 v.reset(OpAMD64MOVBstore)
17412 v.AuxInt = int32ToAuxInt(off)
17413 v.Aux = symToAux(sym)
17414 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17415 v0.AuxInt = int32ToAuxInt(1)
17416 v.AddArg3(ptr, v0, mem)
17417 return true
17418 }
17419
17420
17421 for {
17422 off := auxIntToInt32(v.AuxInt)
17423 sym := auxToSym(v.Aux)
17424 ptr := v_0
17425 if v_1.Op != OpAMD64FlagLT_UGT {
17426 break
17427 }
17428 mem := v_2
17429 v.reset(OpAMD64MOVBstore)
17430 v.AuxInt = int32ToAuxInt(off)
17431 v.Aux = symToAux(sym)
17432 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17433 v0.AuxInt = int32ToAuxInt(0)
17434 v.AddArg3(ptr, v0, mem)
17435 return true
17436 }
17437
17438
17439 for {
17440 off := auxIntToInt32(v.AuxInt)
17441 sym := auxToSym(v.Aux)
17442 ptr := v_0
17443 if v_1.Op != OpAMD64FlagGT_ULT {
17444 break
17445 }
17446 mem := v_2
17447 v.reset(OpAMD64MOVBstore)
17448 v.AuxInt = int32ToAuxInt(off)
17449 v.Aux = symToAux(sym)
17450 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17451 v0.AuxInt = int32ToAuxInt(1)
17452 v.AddArg3(ptr, v0, mem)
17453 return true
17454 }
17455
17456
17457 for {
17458 off := auxIntToInt32(v.AuxInt)
17459 sym := auxToSym(v.Aux)
17460 ptr := v_0
17461 if v_1.Op != OpAMD64FlagGT_UGT {
17462 break
17463 }
17464 mem := v_2
17465 v.reset(OpAMD64MOVBstore)
17466 v.AuxInt = int32ToAuxInt(off)
17467 v.Aux = symToAux(sym)
17468 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17469 v0.AuxInt = int32ToAuxInt(0)
17470 v.AddArg3(ptr, v0, mem)
17471 return true
17472 }
17473 return false
17474 }
17475 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
17476 v_0 := v.Args[0]
17477 b := v.Block
17478
17479
17480 for {
17481 if v_0.Op != OpAMD64TESTL {
17482 break
17483 }
17484 _ = v_0.Args[1]
17485 v_0_0 := v_0.Args[0]
17486 v_0_1 := v_0.Args[1]
17487 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17488 if v_0_0.Op != OpAMD64SHLL {
17489 continue
17490 }
17491 x := v_0_0.Args[1]
17492 v_0_0_0 := v_0_0.Args[0]
17493 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
17494 continue
17495 }
17496 y := v_0_1
17497 v.reset(OpAMD64SETAE)
17498 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17499 v0.AddArg2(x, y)
17500 v.AddArg(v0)
17501 return true
17502 }
17503 break
17504 }
17505
17506
17507 for {
17508 if v_0.Op != OpAMD64TESTQ {
17509 break
17510 }
17511 _ = v_0.Args[1]
17512 v_0_0 := v_0.Args[0]
17513 v_0_1 := v_0.Args[1]
17514 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17515 if v_0_0.Op != OpAMD64SHLQ {
17516 continue
17517 }
17518 x := v_0_0.Args[1]
17519 v_0_0_0 := v_0_0.Args[0]
17520 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
17521 continue
17522 }
17523 y := v_0_1
17524 v.reset(OpAMD64SETAE)
17525 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
17526 v0.AddArg2(x, y)
17527 v.AddArg(v0)
17528 return true
17529 }
17530 break
17531 }
17532
17533
17534
17535 for {
17536 if v_0.Op != OpAMD64TESTLconst {
17537 break
17538 }
17539 c := auxIntToInt32(v_0.AuxInt)
17540 x := v_0.Args[0]
17541 if !(isUint32PowerOfTwo(int64(c))) {
17542 break
17543 }
17544 v.reset(OpAMD64SETAE)
17545 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17546 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17547 v0.AddArg(x)
17548 v.AddArg(v0)
17549 return true
17550 }
17551
17552
17553
17554 for {
17555 if v_0.Op != OpAMD64TESTQconst {
17556 break
17557 }
17558 c := auxIntToInt32(v_0.AuxInt)
17559 x := v_0.Args[0]
17560 if !(isUint64PowerOfTwo(int64(c))) {
17561 break
17562 }
17563 v.reset(OpAMD64SETAE)
17564 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17565 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17566 v0.AddArg(x)
17567 v.AddArg(v0)
17568 return true
17569 }
17570
17571
17572
17573 for {
17574 if v_0.Op != OpAMD64TESTQ {
17575 break
17576 }
17577 _ = v_0.Args[1]
17578 v_0_0 := v_0.Args[0]
17579 v_0_1 := v_0.Args[1]
17580 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17581 if v_0_0.Op != OpAMD64MOVQconst {
17582 continue
17583 }
17584 c := auxIntToInt64(v_0_0.AuxInt)
17585 x := v_0_1
17586 if !(isUint64PowerOfTwo(c)) {
17587 continue
17588 }
17589 v.reset(OpAMD64SETAE)
17590 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17591 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
17592 v0.AddArg(x)
17593 v.AddArg(v0)
17594 return true
17595 }
17596 break
17597 }
17598
17599
17600 for {
17601 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
17602 break
17603 }
17604 s := v_0.Args[0]
17605 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
17606 break
17607 }
17608 v.reset(OpAMD64SETNE)
17609 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
17610 v0.AuxInt = int32ToAuxInt(0)
17611 v0.AddArg(s)
17612 v.AddArg(v0)
17613 return true
17614 }
17615
17616
17617 for {
17618 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
17619 break
17620 }
17621 s := v_0.Args[0]
17622 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
17623 break
17624 }
17625 v.reset(OpAMD64SETNE)
17626 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
17627 v0.AuxInt = int32ToAuxInt(0)
17628 v0.AddArg(s)
17629 v.AddArg(v0)
17630 return true
17631 }
17632
17633
17634
17635 for {
17636 if v_0.Op != OpAMD64TESTQ {
17637 break
17638 }
17639 _ = v_0.Args[1]
17640 v_0_0 := v_0.Args[0]
17641 v_0_1 := v_0.Args[1]
17642 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17643 z1 := v_0_0
17644 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
17645 continue
17646 }
17647 z1_0 := z1.Args[0]
17648 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17649 continue
17650 }
17651 x := z1_0.Args[0]
17652 z2 := v_0_1
17653 if !(z1 == z2) {
17654 continue
17655 }
17656 v.reset(OpAMD64SETAE)
17657 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17658 v0.AuxInt = int8ToAuxInt(63)
17659 v0.AddArg(x)
17660 v.AddArg(v0)
17661 return true
17662 }
17663 break
17664 }
17665
17666
17667
17668 for {
17669 if v_0.Op != OpAMD64TESTL {
17670 break
17671 }
17672 _ = v_0.Args[1]
17673 v_0_0 := v_0.Args[0]
17674 v_0_1 := v_0.Args[1]
17675 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17676 z1 := v_0_0
17677 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
17678 continue
17679 }
17680 z1_0 := z1.Args[0]
17681 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17682 continue
17683 }
17684 x := z1_0.Args[0]
17685 z2 := v_0_1
17686 if !(z1 == z2) {
17687 continue
17688 }
17689 v.reset(OpAMD64SETAE)
17690 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17691 v0.AuxInt = int8ToAuxInt(31)
17692 v0.AddArg(x)
17693 v.AddArg(v0)
17694 return true
17695 }
17696 break
17697 }
17698
17699
17700
17701 for {
17702 if v_0.Op != OpAMD64TESTQ {
17703 break
17704 }
17705 _ = v_0.Args[1]
17706 v_0_0 := v_0.Args[0]
17707 v_0_1 := v_0.Args[1]
17708 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17709 z1 := v_0_0
17710 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
17711 continue
17712 }
17713 z1_0 := z1.Args[0]
17714 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17715 continue
17716 }
17717 x := z1_0.Args[0]
17718 z2 := v_0_1
17719 if !(z1 == z2) {
17720 continue
17721 }
17722 v.reset(OpAMD64SETAE)
17723 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17724 v0.AuxInt = int8ToAuxInt(0)
17725 v0.AddArg(x)
17726 v.AddArg(v0)
17727 return true
17728 }
17729 break
17730 }
17731
17732
17733
17734 for {
17735 if v_0.Op != OpAMD64TESTL {
17736 break
17737 }
17738 _ = v_0.Args[1]
17739 v_0_0 := v_0.Args[0]
17740 v_0_1 := v_0.Args[1]
17741 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17742 z1 := v_0_0
17743 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
17744 continue
17745 }
17746 z1_0 := z1.Args[0]
17747 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17748 continue
17749 }
17750 x := z1_0.Args[0]
17751 z2 := v_0_1
17752 if !(z1 == z2) {
17753 continue
17754 }
17755 v.reset(OpAMD64SETAE)
17756 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17757 v0.AuxInt = int8ToAuxInt(0)
17758 v0.AddArg(x)
17759 v.AddArg(v0)
17760 return true
17761 }
17762 break
17763 }
17764
17765
17766
17767 for {
17768 if v_0.Op != OpAMD64TESTQ {
17769 break
17770 }
17771 _ = v_0.Args[1]
17772 v_0_0 := v_0.Args[0]
17773 v_0_1 := v_0.Args[1]
17774 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17775 z1 := v_0_0
17776 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
17777 continue
17778 }
17779 x := z1.Args[0]
17780 z2 := v_0_1
17781 if !(z1 == z2) {
17782 continue
17783 }
17784 v.reset(OpAMD64SETAE)
17785 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17786 v0.AuxInt = int8ToAuxInt(63)
17787 v0.AddArg(x)
17788 v.AddArg(v0)
17789 return true
17790 }
17791 break
17792 }
17793
17794
17795
17796 for {
17797 if v_0.Op != OpAMD64TESTL {
17798 break
17799 }
17800 _ = v_0.Args[1]
17801 v_0_0 := v_0.Args[0]
17802 v_0_1 := v_0.Args[1]
17803 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17804 z1 := v_0_0
17805 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
17806 continue
17807 }
17808 x := z1.Args[0]
17809 z2 := v_0_1
17810 if !(z1 == z2) {
17811 continue
17812 }
17813 v.reset(OpAMD64SETAE)
17814 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17815 v0.AuxInt = int8ToAuxInt(31)
17816 v0.AddArg(x)
17817 v.AddArg(v0)
17818 return true
17819 }
17820 break
17821 }
17822
17823
17824 for {
17825 if v_0.Op != OpAMD64InvertFlags {
17826 break
17827 }
17828 x := v_0.Args[0]
17829 v.reset(OpAMD64SETEQ)
17830 v.AddArg(x)
17831 return true
17832 }
17833
17834
17835 for {
17836 if v_0.Op != OpAMD64FlagEQ {
17837 break
17838 }
17839 v.reset(OpAMD64MOVLconst)
17840 v.AuxInt = int32ToAuxInt(1)
17841 return true
17842 }
17843
17844
17845 for {
17846 if v_0.Op != OpAMD64FlagLT_ULT {
17847 break
17848 }
17849 v.reset(OpAMD64MOVLconst)
17850 v.AuxInt = int32ToAuxInt(0)
17851 return true
17852 }
17853
17854
17855 for {
17856 if v_0.Op != OpAMD64FlagLT_UGT {
17857 break
17858 }
17859 v.reset(OpAMD64MOVLconst)
17860 v.AuxInt = int32ToAuxInt(0)
17861 return true
17862 }
17863
17864
17865 for {
17866 if v_0.Op != OpAMD64FlagGT_ULT {
17867 break
17868 }
17869 v.reset(OpAMD64MOVLconst)
17870 v.AuxInt = int32ToAuxInt(0)
17871 return true
17872 }
17873
17874
17875 for {
17876 if v_0.Op != OpAMD64FlagGT_UGT {
17877 break
17878 }
17879 v.reset(OpAMD64MOVLconst)
17880 v.AuxInt = int32ToAuxInt(0)
17881 return true
17882 }
17883
17884
17885 for {
17886 if v_0.Op != OpAMD64TESTQ {
17887 break
17888 }
17889 _ = v_0.Args[1]
17890 v_0_0 := v_0.Args[0]
17891 v_0_1 := v_0.Args[1]
17892 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17893 s := v_0_0
17894 if s.Op != OpSelect0 {
17895 continue
17896 }
17897 blsr := s.Args[0]
17898 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
17899 continue
17900 }
17901 v.reset(OpAMD64SETEQ)
17902 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
17903 v0.AddArg(blsr)
17904 v.AddArg(v0)
17905 return true
17906 }
17907 break
17908 }
17909
17910
17911 for {
17912 if v_0.Op != OpAMD64TESTL {
17913 break
17914 }
17915 _ = v_0.Args[1]
17916 v_0_0 := v_0.Args[0]
17917 v_0_1 := v_0.Args[1]
17918 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17919 s := v_0_0
17920 if s.Op != OpSelect0 {
17921 continue
17922 }
17923 blsr := s.Args[0]
17924 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
17925 continue
17926 }
17927 v.reset(OpAMD64SETEQ)
17928 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
17929 v0.AddArg(blsr)
17930 v.AddArg(v0)
17931 return true
17932 }
17933 break
17934 }
17935 return false
17936 }
17937 func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
17938 v_2 := v.Args[2]
17939 v_1 := v.Args[1]
17940 v_0 := v.Args[0]
17941 b := v.Block
17942 typ := &b.Func.Config.Types
17943
17944
17945 for {
17946 off := auxIntToInt32(v.AuxInt)
17947 sym := auxToSym(v.Aux)
17948 ptr := v_0
17949 if v_1.Op != OpAMD64TESTL {
17950 break
17951 }
17952 _ = v_1.Args[1]
17953 v_1_0 := v_1.Args[0]
17954 v_1_1 := v_1.Args[1]
17955 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17956 if v_1_0.Op != OpAMD64SHLL {
17957 continue
17958 }
17959 x := v_1_0.Args[1]
17960 v_1_0_0 := v_1_0.Args[0]
17961 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
17962 continue
17963 }
17964 y := v_1_1
17965 mem := v_2
17966 v.reset(OpAMD64SETAEstore)
17967 v.AuxInt = int32ToAuxInt(off)
17968 v.Aux = symToAux(sym)
17969 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17970 v0.AddArg2(x, y)
17971 v.AddArg3(ptr, v0, mem)
17972 return true
17973 }
17974 break
17975 }
17976
17977
17978 for {
17979 off := auxIntToInt32(v.AuxInt)
17980 sym := auxToSym(v.Aux)
17981 ptr := v_0
17982 if v_1.Op != OpAMD64TESTQ {
17983 break
17984 }
17985 _ = v_1.Args[1]
17986 v_1_0 := v_1.Args[0]
17987 v_1_1 := v_1.Args[1]
17988 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17989 if v_1_0.Op != OpAMD64SHLQ {
17990 continue
17991 }
17992 x := v_1_0.Args[1]
17993 v_1_0_0 := v_1_0.Args[0]
17994 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
17995 continue
17996 }
17997 y := v_1_1
17998 mem := v_2
17999 v.reset(OpAMD64SETAEstore)
18000 v.AuxInt = int32ToAuxInt(off)
18001 v.Aux = symToAux(sym)
18002 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
18003 v0.AddArg2(x, y)
18004 v.AddArg3(ptr, v0, mem)
18005 return true
18006 }
18007 break
18008 }
18009
18010
18011
18012 for {
18013 off := auxIntToInt32(v.AuxInt)
18014 sym := auxToSym(v.Aux)
18015 ptr := v_0
18016 if v_1.Op != OpAMD64TESTLconst {
18017 break
18018 }
18019 c := auxIntToInt32(v_1.AuxInt)
18020 x := v_1.Args[0]
18021 mem := v_2
18022 if !(isUint32PowerOfTwo(int64(c))) {
18023 break
18024 }
18025 v.reset(OpAMD64SETAEstore)
18026 v.AuxInt = int32ToAuxInt(off)
18027 v.Aux = symToAux(sym)
18028 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18029 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
18030 v0.AddArg(x)
18031 v.AddArg3(ptr, v0, mem)
18032 return true
18033 }
18034
18035
18036
18037 for {
18038 off := auxIntToInt32(v.AuxInt)
18039 sym := auxToSym(v.Aux)
18040 ptr := v_0
18041 if v_1.Op != OpAMD64TESTQconst {
18042 break
18043 }
18044 c := auxIntToInt32(v_1.AuxInt)
18045 x := v_1.Args[0]
18046 mem := v_2
18047 if !(isUint64PowerOfTwo(int64(c))) {
18048 break
18049 }
18050 v.reset(OpAMD64SETAEstore)
18051 v.AuxInt = int32ToAuxInt(off)
18052 v.Aux = symToAux(sym)
18053 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18054 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
18055 v0.AddArg(x)
18056 v.AddArg3(ptr, v0, mem)
18057 return true
18058 }
18059
18060
18061
18062 for {
18063 off := auxIntToInt32(v.AuxInt)
18064 sym := auxToSym(v.Aux)
18065 ptr := v_0
18066 if v_1.Op != OpAMD64TESTQ {
18067 break
18068 }
18069 _ = v_1.Args[1]
18070 v_1_0 := v_1.Args[0]
18071 v_1_1 := v_1.Args[1]
18072 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18073 if v_1_0.Op != OpAMD64MOVQconst {
18074 continue
18075 }
18076 c := auxIntToInt64(v_1_0.AuxInt)
18077 x := v_1_1
18078 mem := v_2
18079 if !(isUint64PowerOfTwo(c)) {
18080 continue
18081 }
18082 v.reset(OpAMD64SETAEstore)
18083 v.AuxInt = int32ToAuxInt(off)
18084 v.Aux = symToAux(sym)
18085 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18086 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
18087 v0.AddArg(x)
18088 v.AddArg3(ptr, v0, mem)
18089 return true
18090 }
18091 break
18092 }
18093
18094
18095 for {
18096 off := auxIntToInt32(v.AuxInt)
18097 sym := auxToSym(v.Aux)
18098 ptr := v_0
18099 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
18100 break
18101 }
18102 s := v_1.Args[0]
18103 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
18104 break
18105 }
18106 mem := v_2
18107 v.reset(OpAMD64SETNEstore)
18108 v.AuxInt = int32ToAuxInt(off)
18109 v.Aux = symToAux(sym)
18110 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
18111 v0.AuxInt = int32ToAuxInt(0)
18112 v0.AddArg(s)
18113 v.AddArg3(ptr, v0, mem)
18114 return true
18115 }
18116
18117
18118 for {
18119 off := auxIntToInt32(v.AuxInt)
18120 sym := auxToSym(v.Aux)
18121 ptr := v_0
18122 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
18123 break
18124 }
18125 s := v_1.Args[0]
18126 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
18127 break
18128 }
18129 mem := v_2
18130 v.reset(OpAMD64SETNEstore)
18131 v.AuxInt = int32ToAuxInt(off)
18132 v.Aux = symToAux(sym)
18133 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
18134 v0.AuxInt = int32ToAuxInt(0)
18135 v0.AddArg(s)
18136 v.AddArg3(ptr, v0, mem)
18137 return true
18138 }
18139
18140
18141
18142 for {
18143 off := auxIntToInt32(v.AuxInt)
18144 sym := auxToSym(v.Aux)
18145 ptr := v_0
18146 if v_1.Op != OpAMD64TESTQ {
18147 break
18148 }
18149 _ = v_1.Args[1]
18150 v_1_0 := v_1.Args[0]
18151 v_1_1 := v_1.Args[1]
18152 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18153 z1 := v_1_0
18154 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
18155 continue
18156 }
18157 z1_0 := z1.Args[0]
18158 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18159 continue
18160 }
18161 x := z1_0.Args[0]
18162 z2 := v_1_1
18163 mem := v_2
18164 if !(z1 == z2) {
18165 continue
18166 }
18167 v.reset(OpAMD64SETAEstore)
18168 v.AuxInt = int32ToAuxInt(off)
18169 v.Aux = symToAux(sym)
18170 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18171 v0.AuxInt = int8ToAuxInt(63)
18172 v0.AddArg(x)
18173 v.AddArg3(ptr, v0, mem)
18174 return true
18175 }
18176 break
18177 }
18178
18179
18180
18181 for {
18182 off := auxIntToInt32(v.AuxInt)
18183 sym := auxToSym(v.Aux)
18184 ptr := v_0
18185 if v_1.Op != OpAMD64TESTL {
18186 break
18187 }
18188 _ = v_1.Args[1]
18189 v_1_0 := v_1.Args[0]
18190 v_1_1 := v_1.Args[1]
18191 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18192 z1 := v_1_0
18193 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
18194 continue
18195 }
18196 z1_0 := z1.Args[0]
18197 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18198 continue
18199 }
18200 x := z1_0.Args[0]
18201 z2 := v_1_1
18202 mem := v_2
18203 if !(z1 == z2) {
18204 continue
18205 }
18206 v.reset(OpAMD64SETAEstore)
18207 v.AuxInt = int32ToAuxInt(off)
18208 v.Aux = symToAux(sym)
18209 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18210 v0.AuxInt = int8ToAuxInt(31)
18211 v0.AddArg(x)
18212 v.AddArg3(ptr, v0, mem)
18213 return true
18214 }
18215 break
18216 }
18217
18218
18219
18220 for {
18221 off := auxIntToInt32(v.AuxInt)
18222 sym := auxToSym(v.Aux)
18223 ptr := v_0
18224 if v_1.Op != OpAMD64TESTQ {
18225 break
18226 }
18227 _ = v_1.Args[1]
18228 v_1_0 := v_1.Args[0]
18229 v_1_1 := v_1.Args[1]
18230 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18231 z1 := v_1_0
18232 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18233 continue
18234 }
18235 z1_0 := z1.Args[0]
18236 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18237 continue
18238 }
18239 x := z1_0.Args[0]
18240 z2 := v_1_1
18241 mem := v_2
18242 if !(z1 == z2) {
18243 continue
18244 }
18245 v.reset(OpAMD64SETAEstore)
18246 v.AuxInt = int32ToAuxInt(off)
18247 v.Aux = symToAux(sym)
18248 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18249 v0.AuxInt = int8ToAuxInt(0)
18250 v0.AddArg(x)
18251 v.AddArg3(ptr, v0, mem)
18252 return true
18253 }
18254 break
18255 }
18256
18257
18258
18259 for {
18260 off := auxIntToInt32(v.AuxInt)
18261 sym := auxToSym(v.Aux)
18262 ptr := v_0
18263 if v_1.Op != OpAMD64TESTL {
18264 break
18265 }
18266 _ = v_1.Args[1]
18267 v_1_0 := v_1.Args[0]
18268 v_1_1 := v_1.Args[1]
18269 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18270 z1 := v_1_0
18271 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18272 continue
18273 }
18274 z1_0 := z1.Args[0]
18275 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18276 continue
18277 }
18278 x := z1_0.Args[0]
18279 z2 := v_1_1
18280 mem := v_2
18281 if !(z1 == z2) {
18282 continue
18283 }
18284 v.reset(OpAMD64SETAEstore)
18285 v.AuxInt = int32ToAuxInt(off)
18286 v.Aux = symToAux(sym)
18287 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18288 v0.AuxInt = int8ToAuxInt(0)
18289 v0.AddArg(x)
18290 v.AddArg3(ptr, v0, mem)
18291 return true
18292 }
18293 break
18294 }
18295
18296
18297
18298 for {
18299 off := auxIntToInt32(v.AuxInt)
18300 sym := auxToSym(v.Aux)
18301 ptr := v_0
18302 if v_1.Op != OpAMD64TESTQ {
18303 break
18304 }
18305 _ = v_1.Args[1]
18306 v_1_0 := v_1.Args[0]
18307 v_1_1 := v_1.Args[1]
18308 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18309 z1 := v_1_0
18310 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18311 continue
18312 }
18313 x := z1.Args[0]
18314 z2 := v_1_1
18315 mem := v_2
18316 if !(z1 == z2) {
18317 continue
18318 }
18319 v.reset(OpAMD64SETAEstore)
18320 v.AuxInt = int32ToAuxInt(off)
18321 v.Aux = symToAux(sym)
18322 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18323 v0.AuxInt = int8ToAuxInt(63)
18324 v0.AddArg(x)
18325 v.AddArg3(ptr, v0, mem)
18326 return true
18327 }
18328 break
18329 }
18330
18331
18332
18333 for {
18334 off := auxIntToInt32(v.AuxInt)
18335 sym := auxToSym(v.Aux)
18336 ptr := v_0
18337 if v_1.Op != OpAMD64TESTL {
18338 break
18339 }
18340 _ = v_1.Args[1]
18341 v_1_0 := v_1.Args[0]
18342 v_1_1 := v_1.Args[1]
18343 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18344 z1 := v_1_0
18345 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18346 continue
18347 }
18348 x := z1.Args[0]
18349 z2 := v_1_1
18350 mem := v_2
18351 if !(z1 == z2) {
18352 continue
18353 }
18354 v.reset(OpAMD64SETAEstore)
18355 v.AuxInt = int32ToAuxInt(off)
18356 v.Aux = symToAux(sym)
18357 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18358 v0.AuxInt = int8ToAuxInt(31)
18359 v0.AddArg(x)
18360 v.AddArg3(ptr, v0, mem)
18361 return true
18362 }
18363 break
18364 }
18365
18366
18367 for {
18368 off := auxIntToInt32(v.AuxInt)
18369 sym := auxToSym(v.Aux)
18370 ptr := v_0
18371 if v_1.Op != OpAMD64InvertFlags {
18372 break
18373 }
18374 x := v_1.Args[0]
18375 mem := v_2
18376 v.reset(OpAMD64SETEQstore)
18377 v.AuxInt = int32ToAuxInt(off)
18378 v.Aux = symToAux(sym)
18379 v.AddArg3(ptr, x, mem)
18380 return true
18381 }
18382
18383
18384
18385 for {
18386 off1 := auxIntToInt32(v.AuxInt)
18387 sym := auxToSym(v.Aux)
18388 if v_0.Op != OpAMD64ADDQconst {
18389 break
18390 }
18391 off2 := auxIntToInt32(v_0.AuxInt)
18392 base := v_0.Args[0]
18393 val := v_1
18394 mem := v_2
18395 if !(is32Bit(int64(off1) + int64(off2))) {
18396 break
18397 }
18398 v.reset(OpAMD64SETEQstore)
18399 v.AuxInt = int32ToAuxInt(off1 + off2)
18400 v.Aux = symToAux(sym)
18401 v.AddArg3(base, val, mem)
18402 return true
18403 }
18404
18405
18406
18407 for {
18408 off1 := auxIntToInt32(v.AuxInt)
18409 sym1 := auxToSym(v.Aux)
18410 if v_0.Op != OpAMD64LEAQ {
18411 break
18412 }
18413 off2 := auxIntToInt32(v_0.AuxInt)
18414 sym2 := auxToSym(v_0.Aux)
18415 base := v_0.Args[0]
18416 val := v_1
18417 mem := v_2
18418 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18419 break
18420 }
18421 v.reset(OpAMD64SETEQstore)
18422 v.AuxInt = int32ToAuxInt(off1 + off2)
18423 v.Aux = symToAux(mergeSym(sym1, sym2))
18424 v.AddArg3(base, val, mem)
18425 return true
18426 }
18427
18428
18429 for {
18430 off := auxIntToInt32(v.AuxInt)
18431 sym := auxToSym(v.Aux)
18432 ptr := v_0
18433 if v_1.Op != OpAMD64FlagEQ {
18434 break
18435 }
18436 mem := v_2
18437 v.reset(OpAMD64MOVBstore)
18438 v.AuxInt = int32ToAuxInt(off)
18439 v.Aux = symToAux(sym)
18440 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18441 v0.AuxInt = int32ToAuxInt(1)
18442 v.AddArg3(ptr, v0, mem)
18443 return true
18444 }
18445
18446
18447 for {
18448 off := auxIntToInt32(v.AuxInt)
18449 sym := auxToSym(v.Aux)
18450 ptr := v_0
18451 if v_1.Op != OpAMD64FlagLT_ULT {
18452 break
18453 }
18454 mem := v_2
18455 v.reset(OpAMD64MOVBstore)
18456 v.AuxInt = int32ToAuxInt(off)
18457 v.Aux = symToAux(sym)
18458 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18459 v0.AuxInt = int32ToAuxInt(0)
18460 v.AddArg3(ptr, v0, mem)
18461 return true
18462 }
18463
18464
18465 for {
18466 off := auxIntToInt32(v.AuxInt)
18467 sym := auxToSym(v.Aux)
18468 ptr := v_0
18469 if v_1.Op != OpAMD64FlagLT_UGT {
18470 break
18471 }
18472 mem := v_2
18473 v.reset(OpAMD64MOVBstore)
18474 v.AuxInt = int32ToAuxInt(off)
18475 v.Aux = symToAux(sym)
18476 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18477 v0.AuxInt = int32ToAuxInt(0)
18478 v.AddArg3(ptr, v0, mem)
18479 return true
18480 }
18481
18482
18483 for {
18484 off := auxIntToInt32(v.AuxInt)
18485 sym := auxToSym(v.Aux)
18486 ptr := v_0
18487 if v_1.Op != OpAMD64FlagGT_ULT {
18488 break
18489 }
18490 mem := v_2
18491 v.reset(OpAMD64MOVBstore)
18492 v.AuxInt = int32ToAuxInt(off)
18493 v.Aux = symToAux(sym)
18494 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18495 v0.AuxInt = int32ToAuxInt(0)
18496 v.AddArg3(ptr, v0, mem)
18497 return true
18498 }
18499
18500
18501 for {
18502 off := auxIntToInt32(v.AuxInt)
18503 sym := auxToSym(v.Aux)
18504 ptr := v_0
18505 if v_1.Op != OpAMD64FlagGT_UGT {
18506 break
18507 }
18508 mem := v_2
18509 v.reset(OpAMD64MOVBstore)
18510 v.AuxInt = int32ToAuxInt(off)
18511 v.Aux = symToAux(sym)
18512 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18513 v0.AuxInt = int32ToAuxInt(0)
18514 v.AddArg3(ptr, v0, mem)
18515 return true
18516 }
18517 return false
18518 }
18519 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
18520 v_0 := v.Args[0]
18521
18522
18523 for {
18524 if v_0.Op != OpAMD64InvertFlags {
18525 break
18526 }
18527 x := v_0.Args[0]
18528 v.reset(OpAMD64SETL)
18529 v.AddArg(x)
18530 return true
18531 }
18532
18533
18534 for {
18535 if v_0.Op != OpAMD64FlagEQ {
18536 break
18537 }
18538 v.reset(OpAMD64MOVLconst)
18539 v.AuxInt = int32ToAuxInt(0)
18540 return true
18541 }
18542
18543
18544 for {
18545 if v_0.Op != OpAMD64FlagLT_ULT {
18546 break
18547 }
18548 v.reset(OpAMD64MOVLconst)
18549 v.AuxInt = int32ToAuxInt(0)
18550 return true
18551 }
18552
18553
18554 for {
18555 if v_0.Op != OpAMD64FlagLT_UGT {
18556 break
18557 }
18558 v.reset(OpAMD64MOVLconst)
18559 v.AuxInt = int32ToAuxInt(0)
18560 return true
18561 }
18562
18563
18564 for {
18565 if v_0.Op != OpAMD64FlagGT_ULT {
18566 break
18567 }
18568 v.reset(OpAMD64MOVLconst)
18569 v.AuxInt = int32ToAuxInt(1)
18570 return true
18571 }
18572
18573
18574 for {
18575 if v_0.Op != OpAMD64FlagGT_UGT {
18576 break
18577 }
18578 v.reset(OpAMD64MOVLconst)
18579 v.AuxInt = int32ToAuxInt(1)
18580 return true
18581 }
18582 return false
18583 }
18584 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
18585 v_0 := v.Args[0]
18586
18587
18588 for {
18589 if v_0.Op != OpAMD64InvertFlags {
18590 break
18591 }
18592 x := v_0.Args[0]
18593 v.reset(OpAMD64SETLE)
18594 v.AddArg(x)
18595 return true
18596 }
18597
18598
18599 for {
18600 if v_0.Op != OpAMD64FlagEQ {
18601 break
18602 }
18603 v.reset(OpAMD64MOVLconst)
18604 v.AuxInt = int32ToAuxInt(1)
18605 return true
18606 }
18607
18608
18609 for {
18610 if v_0.Op != OpAMD64FlagLT_ULT {
18611 break
18612 }
18613 v.reset(OpAMD64MOVLconst)
18614 v.AuxInt = int32ToAuxInt(0)
18615 return true
18616 }
18617
18618
18619 for {
18620 if v_0.Op != OpAMD64FlagLT_UGT {
18621 break
18622 }
18623 v.reset(OpAMD64MOVLconst)
18624 v.AuxInt = int32ToAuxInt(0)
18625 return true
18626 }
18627
18628
18629 for {
18630 if v_0.Op != OpAMD64FlagGT_ULT {
18631 break
18632 }
18633 v.reset(OpAMD64MOVLconst)
18634 v.AuxInt = int32ToAuxInt(1)
18635 return true
18636 }
18637
18638
18639 for {
18640 if v_0.Op != OpAMD64FlagGT_UGT {
18641 break
18642 }
18643 v.reset(OpAMD64MOVLconst)
18644 v.AuxInt = int32ToAuxInt(1)
18645 return true
18646 }
18647 return false
18648 }
18649 func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
18650 v_2 := v.Args[2]
18651 v_1 := v.Args[1]
18652 v_0 := v.Args[0]
18653 b := v.Block
18654 typ := &b.Func.Config.Types
18655
18656
18657 for {
18658 off := auxIntToInt32(v.AuxInt)
18659 sym := auxToSym(v.Aux)
18660 ptr := v_0
18661 if v_1.Op != OpAMD64InvertFlags {
18662 break
18663 }
18664 x := v_1.Args[0]
18665 mem := v_2
18666 v.reset(OpAMD64SETLEstore)
18667 v.AuxInt = int32ToAuxInt(off)
18668 v.Aux = symToAux(sym)
18669 v.AddArg3(ptr, x, mem)
18670 return true
18671 }
18672
18673
18674
18675 for {
18676 off1 := auxIntToInt32(v.AuxInt)
18677 sym := auxToSym(v.Aux)
18678 if v_0.Op != OpAMD64ADDQconst {
18679 break
18680 }
18681 off2 := auxIntToInt32(v_0.AuxInt)
18682 base := v_0.Args[0]
18683 val := v_1
18684 mem := v_2
18685 if !(is32Bit(int64(off1) + int64(off2))) {
18686 break
18687 }
18688 v.reset(OpAMD64SETGEstore)
18689 v.AuxInt = int32ToAuxInt(off1 + off2)
18690 v.Aux = symToAux(sym)
18691 v.AddArg3(base, val, mem)
18692 return true
18693 }
18694
18695
18696
18697 for {
18698 off1 := auxIntToInt32(v.AuxInt)
18699 sym1 := auxToSym(v.Aux)
18700 if v_0.Op != OpAMD64LEAQ {
18701 break
18702 }
18703 off2 := auxIntToInt32(v_0.AuxInt)
18704 sym2 := auxToSym(v_0.Aux)
18705 base := v_0.Args[0]
18706 val := v_1
18707 mem := v_2
18708 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18709 break
18710 }
18711 v.reset(OpAMD64SETGEstore)
18712 v.AuxInt = int32ToAuxInt(off1 + off2)
18713 v.Aux = symToAux(mergeSym(sym1, sym2))
18714 v.AddArg3(base, val, mem)
18715 return true
18716 }
18717
18718
18719 for {
18720 off := auxIntToInt32(v.AuxInt)
18721 sym := auxToSym(v.Aux)
18722 ptr := v_0
18723 if v_1.Op != OpAMD64FlagEQ {
18724 break
18725 }
18726 mem := v_2
18727 v.reset(OpAMD64MOVBstore)
18728 v.AuxInt = int32ToAuxInt(off)
18729 v.Aux = symToAux(sym)
18730 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18731 v0.AuxInt = int32ToAuxInt(1)
18732 v.AddArg3(ptr, v0, mem)
18733 return true
18734 }
18735
18736
18737 for {
18738 off := auxIntToInt32(v.AuxInt)
18739 sym := auxToSym(v.Aux)
18740 ptr := v_0
18741 if v_1.Op != OpAMD64FlagLT_ULT {
18742 break
18743 }
18744 mem := v_2
18745 v.reset(OpAMD64MOVBstore)
18746 v.AuxInt = int32ToAuxInt(off)
18747 v.Aux = symToAux(sym)
18748 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18749 v0.AuxInt = int32ToAuxInt(0)
18750 v.AddArg3(ptr, v0, mem)
18751 return true
18752 }
18753
18754
18755 for {
18756 off := auxIntToInt32(v.AuxInt)
18757 sym := auxToSym(v.Aux)
18758 ptr := v_0
18759 if v_1.Op != OpAMD64FlagLT_UGT {
18760 break
18761 }
18762 mem := v_2
18763 v.reset(OpAMD64MOVBstore)
18764 v.AuxInt = int32ToAuxInt(off)
18765 v.Aux = symToAux(sym)
18766 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18767 v0.AuxInt = int32ToAuxInt(0)
18768 v.AddArg3(ptr, v0, mem)
18769 return true
18770 }
18771
18772
18773 for {
18774 off := auxIntToInt32(v.AuxInt)
18775 sym := auxToSym(v.Aux)
18776 ptr := v_0
18777 if v_1.Op != OpAMD64FlagGT_ULT {
18778 break
18779 }
18780 mem := v_2
18781 v.reset(OpAMD64MOVBstore)
18782 v.AuxInt = int32ToAuxInt(off)
18783 v.Aux = symToAux(sym)
18784 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18785 v0.AuxInt = int32ToAuxInt(1)
18786 v.AddArg3(ptr, v0, mem)
18787 return true
18788 }
18789
18790
18791 for {
18792 off := auxIntToInt32(v.AuxInt)
18793 sym := auxToSym(v.Aux)
18794 ptr := v_0
18795 if v_1.Op != OpAMD64FlagGT_UGT {
18796 break
18797 }
18798 mem := v_2
18799 v.reset(OpAMD64MOVBstore)
18800 v.AuxInt = int32ToAuxInt(off)
18801 v.Aux = symToAux(sym)
18802 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18803 v0.AuxInt = int32ToAuxInt(1)
18804 v.AddArg3(ptr, v0, mem)
18805 return true
18806 }
18807 return false
18808 }
18809 func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
18810 v_2 := v.Args[2]
18811 v_1 := v.Args[1]
18812 v_0 := v.Args[0]
18813 b := v.Block
18814 typ := &b.Func.Config.Types
18815
18816
18817 for {
18818 off := auxIntToInt32(v.AuxInt)
18819 sym := auxToSym(v.Aux)
18820 ptr := v_0
18821 if v_1.Op != OpAMD64InvertFlags {
18822 break
18823 }
18824 x := v_1.Args[0]
18825 mem := v_2
18826 v.reset(OpAMD64SETLstore)
18827 v.AuxInt = int32ToAuxInt(off)
18828 v.Aux = symToAux(sym)
18829 v.AddArg3(ptr, x, mem)
18830 return true
18831 }
18832
18833
18834
18835 for {
18836 off1 := auxIntToInt32(v.AuxInt)
18837 sym := auxToSym(v.Aux)
18838 if v_0.Op != OpAMD64ADDQconst {
18839 break
18840 }
18841 off2 := auxIntToInt32(v_0.AuxInt)
18842 base := v_0.Args[0]
18843 val := v_1
18844 mem := v_2
18845 if !(is32Bit(int64(off1) + int64(off2))) {
18846 break
18847 }
18848 v.reset(OpAMD64SETGstore)
18849 v.AuxInt = int32ToAuxInt(off1 + off2)
18850 v.Aux = symToAux(sym)
18851 v.AddArg3(base, val, mem)
18852 return true
18853 }
18854
18855
18856
18857 for {
18858 off1 := auxIntToInt32(v.AuxInt)
18859 sym1 := auxToSym(v.Aux)
18860 if v_0.Op != OpAMD64LEAQ {
18861 break
18862 }
18863 off2 := auxIntToInt32(v_0.AuxInt)
18864 sym2 := auxToSym(v_0.Aux)
18865 base := v_0.Args[0]
18866 val := v_1
18867 mem := v_2
18868 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18869 break
18870 }
18871 v.reset(OpAMD64SETGstore)
18872 v.AuxInt = int32ToAuxInt(off1 + off2)
18873 v.Aux = symToAux(mergeSym(sym1, sym2))
18874 v.AddArg3(base, val, mem)
18875 return true
18876 }
18877
18878
18879 for {
18880 off := auxIntToInt32(v.AuxInt)
18881 sym := auxToSym(v.Aux)
18882 ptr := v_0
18883 if v_1.Op != OpAMD64FlagEQ {
18884 break
18885 }
18886 mem := v_2
18887 v.reset(OpAMD64MOVBstore)
18888 v.AuxInt = int32ToAuxInt(off)
18889 v.Aux = symToAux(sym)
18890 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18891 v0.AuxInt = int32ToAuxInt(0)
18892 v.AddArg3(ptr, v0, mem)
18893 return true
18894 }
18895
18896
18897 for {
18898 off := auxIntToInt32(v.AuxInt)
18899 sym := auxToSym(v.Aux)
18900 ptr := v_0
18901 if v_1.Op != OpAMD64FlagLT_ULT {
18902 break
18903 }
18904 mem := v_2
18905 v.reset(OpAMD64MOVBstore)
18906 v.AuxInt = int32ToAuxInt(off)
18907 v.Aux = symToAux(sym)
18908 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18909 v0.AuxInt = int32ToAuxInt(0)
18910 v.AddArg3(ptr, v0, mem)
18911 return true
18912 }
18913
18914
18915 for {
18916 off := auxIntToInt32(v.AuxInt)
18917 sym := auxToSym(v.Aux)
18918 ptr := v_0
18919 if v_1.Op != OpAMD64FlagLT_UGT {
18920 break
18921 }
18922 mem := v_2
18923 v.reset(OpAMD64MOVBstore)
18924 v.AuxInt = int32ToAuxInt(off)
18925 v.Aux = symToAux(sym)
18926 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18927 v0.AuxInt = int32ToAuxInt(0)
18928 v.AddArg3(ptr, v0, mem)
18929 return true
18930 }
18931
18932
18933 for {
18934 off := auxIntToInt32(v.AuxInt)
18935 sym := auxToSym(v.Aux)
18936 ptr := v_0
18937 if v_1.Op != OpAMD64FlagGT_ULT {
18938 break
18939 }
18940 mem := v_2
18941 v.reset(OpAMD64MOVBstore)
18942 v.AuxInt = int32ToAuxInt(off)
18943 v.Aux = symToAux(sym)
18944 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18945 v0.AuxInt = int32ToAuxInt(1)
18946 v.AddArg3(ptr, v0, mem)
18947 return true
18948 }
18949
18950
18951 for {
18952 off := auxIntToInt32(v.AuxInt)
18953 sym := auxToSym(v.Aux)
18954 ptr := v_0
18955 if v_1.Op != OpAMD64FlagGT_UGT {
18956 break
18957 }
18958 mem := v_2
18959 v.reset(OpAMD64MOVBstore)
18960 v.AuxInt = int32ToAuxInt(off)
18961 v.Aux = symToAux(sym)
18962 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18963 v0.AuxInt = int32ToAuxInt(1)
18964 v.AddArg3(ptr, v0, mem)
18965 return true
18966 }
18967 return false
18968 }
18969 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
18970 v_0 := v.Args[0]
18971
18972
18973 for {
18974 if v_0.Op != OpAMD64InvertFlags {
18975 break
18976 }
18977 x := v_0.Args[0]
18978 v.reset(OpAMD64SETG)
18979 v.AddArg(x)
18980 return true
18981 }
18982
18983
18984 for {
18985 if v_0.Op != OpAMD64FlagEQ {
18986 break
18987 }
18988 v.reset(OpAMD64MOVLconst)
18989 v.AuxInt = int32ToAuxInt(0)
18990 return true
18991 }
18992
18993
18994 for {
18995 if v_0.Op != OpAMD64FlagLT_ULT {
18996 break
18997 }
18998 v.reset(OpAMD64MOVLconst)
18999 v.AuxInt = int32ToAuxInt(1)
19000 return true
19001 }
19002
19003
19004 for {
19005 if v_0.Op != OpAMD64FlagLT_UGT {
19006 break
19007 }
19008 v.reset(OpAMD64MOVLconst)
19009 v.AuxInt = int32ToAuxInt(1)
19010 return true
19011 }
19012
19013
19014 for {
19015 if v_0.Op != OpAMD64FlagGT_ULT {
19016 break
19017 }
19018 v.reset(OpAMD64MOVLconst)
19019 v.AuxInt = int32ToAuxInt(0)
19020 return true
19021 }
19022
19023
19024 for {
19025 if v_0.Op != OpAMD64FlagGT_UGT {
19026 break
19027 }
19028 v.reset(OpAMD64MOVLconst)
19029 v.AuxInt = int32ToAuxInt(0)
19030 return true
19031 }
19032 return false
19033 }
19034 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
19035 v_0 := v.Args[0]
19036
19037
19038 for {
19039 if v_0.Op != OpAMD64InvertFlags {
19040 break
19041 }
19042 x := v_0.Args[0]
19043 v.reset(OpAMD64SETGE)
19044 v.AddArg(x)
19045 return true
19046 }
19047
19048
19049 for {
19050 if v_0.Op != OpAMD64FlagEQ {
19051 break
19052 }
19053 v.reset(OpAMD64MOVLconst)
19054 v.AuxInt = int32ToAuxInt(1)
19055 return true
19056 }
19057
19058
19059 for {
19060 if v_0.Op != OpAMD64FlagLT_ULT {
19061 break
19062 }
19063 v.reset(OpAMD64MOVLconst)
19064 v.AuxInt = int32ToAuxInt(1)
19065 return true
19066 }
19067
19068
19069 for {
19070 if v_0.Op != OpAMD64FlagLT_UGT {
19071 break
19072 }
19073 v.reset(OpAMD64MOVLconst)
19074 v.AuxInt = int32ToAuxInt(1)
19075 return true
19076 }
19077
19078
19079 for {
19080 if v_0.Op != OpAMD64FlagGT_ULT {
19081 break
19082 }
19083 v.reset(OpAMD64MOVLconst)
19084 v.AuxInt = int32ToAuxInt(0)
19085 return true
19086 }
19087
19088
19089 for {
19090 if v_0.Op != OpAMD64FlagGT_UGT {
19091 break
19092 }
19093 v.reset(OpAMD64MOVLconst)
19094 v.AuxInt = int32ToAuxInt(0)
19095 return true
19096 }
19097 return false
19098 }
19099 func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
19100 v_2 := v.Args[2]
19101 v_1 := v.Args[1]
19102 v_0 := v.Args[0]
19103 b := v.Block
19104 typ := &b.Func.Config.Types
19105
19106
19107 for {
19108 off := auxIntToInt32(v.AuxInt)
19109 sym := auxToSym(v.Aux)
19110 ptr := v_0
19111 if v_1.Op != OpAMD64InvertFlags {
19112 break
19113 }
19114 x := v_1.Args[0]
19115 mem := v_2
19116 v.reset(OpAMD64SETGEstore)
19117 v.AuxInt = int32ToAuxInt(off)
19118 v.Aux = symToAux(sym)
19119 v.AddArg3(ptr, x, mem)
19120 return true
19121 }
19122
19123
19124
19125 for {
19126 off1 := auxIntToInt32(v.AuxInt)
19127 sym := auxToSym(v.Aux)
19128 if v_0.Op != OpAMD64ADDQconst {
19129 break
19130 }
19131 off2 := auxIntToInt32(v_0.AuxInt)
19132 base := v_0.Args[0]
19133 val := v_1
19134 mem := v_2
19135 if !(is32Bit(int64(off1) + int64(off2))) {
19136 break
19137 }
19138 v.reset(OpAMD64SETLEstore)
19139 v.AuxInt = int32ToAuxInt(off1 + off2)
19140 v.Aux = symToAux(sym)
19141 v.AddArg3(base, val, mem)
19142 return true
19143 }
19144
19145
19146
19147 for {
19148 off1 := auxIntToInt32(v.AuxInt)
19149 sym1 := auxToSym(v.Aux)
19150 if v_0.Op != OpAMD64LEAQ {
19151 break
19152 }
19153 off2 := auxIntToInt32(v_0.AuxInt)
19154 sym2 := auxToSym(v_0.Aux)
19155 base := v_0.Args[0]
19156 val := v_1
19157 mem := v_2
19158 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19159 break
19160 }
19161 v.reset(OpAMD64SETLEstore)
19162 v.AuxInt = int32ToAuxInt(off1 + off2)
19163 v.Aux = symToAux(mergeSym(sym1, sym2))
19164 v.AddArg3(base, val, mem)
19165 return true
19166 }
19167
19168
19169 for {
19170 off := auxIntToInt32(v.AuxInt)
19171 sym := auxToSym(v.Aux)
19172 ptr := v_0
19173 if v_1.Op != OpAMD64FlagEQ {
19174 break
19175 }
19176 mem := v_2
19177 v.reset(OpAMD64MOVBstore)
19178 v.AuxInt = int32ToAuxInt(off)
19179 v.Aux = symToAux(sym)
19180 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19181 v0.AuxInt = int32ToAuxInt(1)
19182 v.AddArg3(ptr, v0, mem)
19183 return true
19184 }
19185
19186
19187 for {
19188 off := auxIntToInt32(v.AuxInt)
19189 sym := auxToSym(v.Aux)
19190 ptr := v_0
19191 if v_1.Op != OpAMD64FlagLT_ULT {
19192 break
19193 }
19194 mem := v_2
19195 v.reset(OpAMD64MOVBstore)
19196 v.AuxInt = int32ToAuxInt(off)
19197 v.Aux = symToAux(sym)
19198 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19199 v0.AuxInt = int32ToAuxInt(1)
19200 v.AddArg3(ptr, v0, mem)
19201 return true
19202 }
19203
19204
19205 for {
19206 off := auxIntToInt32(v.AuxInt)
19207 sym := auxToSym(v.Aux)
19208 ptr := v_0
19209 if v_1.Op != OpAMD64FlagLT_UGT {
19210 break
19211 }
19212 mem := v_2
19213 v.reset(OpAMD64MOVBstore)
19214 v.AuxInt = int32ToAuxInt(off)
19215 v.Aux = symToAux(sym)
19216 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19217 v0.AuxInt = int32ToAuxInt(1)
19218 v.AddArg3(ptr, v0, mem)
19219 return true
19220 }
19221
19222
19223 for {
19224 off := auxIntToInt32(v.AuxInt)
19225 sym := auxToSym(v.Aux)
19226 ptr := v_0
19227 if v_1.Op != OpAMD64FlagGT_ULT {
19228 break
19229 }
19230 mem := v_2
19231 v.reset(OpAMD64MOVBstore)
19232 v.AuxInt = int32ToAuxInt(off)
19233 v.Aux = symToAux(sym)
19234 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19235 v0.AuxInt = int32ToAuxInt(0)
19236 v.AddArg3(ptr, v0, mem)
19237 return true
19238 }
19239
19240
19241 for {
19242 off := auxIntToInt32(v.AuxInt)
19243 sym := auxToSym(v.Aux)
19244 ptr := v_0
19245 if v_1.Op != OpAMD64FlagGT_UGT {
19246 break
19247 }
19248 mem := v_2
19249 v.reset(OpAMD64MOVBstore)
19250 v.AuxInt = int32ToAuxInt(off)
19251 v.Aux = symToAux(sym)
19252 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19253 v0.AuxInt = int32ToAuxInt(0)
19254 v.AddArg3(ptr, v0, mem)
19255 return true
19256 }
19257 return false
19258 }
19259 func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
19260 v_2 := v.Args[2]
19261 v_1 := v.Args[1]
19262 v_0 := v.Args[0]
19263 b := v.Block
19264 typ := &b.Func.Config.Types
19265
19266
19267 for {
19268 off := auxIntToInt32(v.AuxInt)
19269 sym := auxToSym(v.Aux)
19270 ptr := v_0
19271 if v_1.Op != OpAMD64InvertFlags {
19272 break
19273 }
19274 x := v_1.Args[0]
19275 mem := v_2
19276 v.reset(OpAMD64SETGstore)
19277 v.AuxInt = int32ToAuxInt(off)
19278 v.Aux = symToAux(sym)
19279 v.AddArg3(ptr, x, mem)
19280 return true
19281 }
19282
19283
19284
19285 for {
19286 off1 := auxIntToInt32(v.AuxInt)
19287 sym := auxToSym(v.Aux)
19288 if v_0.Op != OpAMD64ADDQconst {
19289 break
19290 }
19291 off2 := auxIntToInt32(v_0.AuxInt)
19292 base := v_0.Args[0]
19293 val := v_1
19294 mem := v_2
19295 if !(is32Bit(int64(off1) + int64(off2))) {
19296 break
19297 }
19298 v.reset(OpAMD64SETLstore)
19299 v.AuxInt = int32ToAuxInt(off1 + off2)
19300 v.Aux = symToAux(sym)
19301 v.AddArg3(base, val, mem)
19302 return true
19303 }
19304
19305
19306
19307 for {
19308 off1 := auxIntToInt32(v.AuxInt)
19309 sym1 := auxToSym(v.Aux)
19310 if v_0.Op != OpAMD64LEAQ {
19311 break
19312 }
19313 off2 := auxIntToInt32(v_0.AuxInt)
19314 sym2 := auxToSym(v_0.Aux)
19315 base := v_0.Args[0]
19316 val := v_1
19317 mem := v_2
19318 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19319 break
19320 }
19321 v.reset(OpAMD64SETLstore)
19322 v.AuxInt = int32ToAuxInt(off1 + off2)
19323 v.Aux = symToAux(mergeSym(sym1, sym2))
19324 v.AddArg3(base, val, mem)
19325 return true
19326 }
19327
19328
19329 for {
19330 off := auxIntToInt32(v.AuxInt)
19331 sym := auxToSym(v.Aux)
19332 ptr := v_0
19333 if v_1.Op != OpAMD64FlagEQ {
19334 break
19335 }
19336 mem := v_2
19337 v.reset(OpAMD64MOVBstore)
19338 v.AuxInt = int32ToAuxInt(off)
19339 v.Aux = symToAux(sym)
19340 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19341 v0.AuxInt = int32ToAuxInt(0)
19342 v.AddArg3(ptr, v0, mem)
19343 return true
19344 }
19345
19346
19347 for {
19348 off := auxIntToInt32(v.AuxInt)
19349 sym := auxToSym(v.Aux)
19350 ptr := v_0
19351 if v_1.Op != OpAMD64FlagLT_ULT {
19352 break
19353 }
19354 mem := v_2
19355 v.reset(OpAMD64MOVBstore)
19356 v.AuxInt = int32ToAuxInt(off)
19357 v.Aux = symToAux(sym)
19358 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19359 v0.AuxInt = int32ToAuxInt(1)
19360 v.AddArg3(ptr, v0, mem)
19361 return true
19362 }
19363
19364
19365 for {
19366 off := auxIntToInt32(v.AuxInt)
19367 sym := auxToSym(v.Aux)
19368 ptr := v_0
19369 if v_1.Op != OpAMD64FlagLT_UGT {
19370 break
19371 }
19372 mem := v_2
19373 v.reset(OpAMD64MOVBstore)
19374 v.AuxInt = int32ToAuxInt(off)
19375 v.Aux = symToAux(sym)
19376 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19377 v0.AuxInt = int32ToAuxInt(1)
19378 v.AddArg3(ptr, v0, mem)
19379 return true
19380 }
19381
19382
19383 for {
19384 off := auxIntToInt32(v.AuxInt)
19385 sym := auxToSym(v.Aux)
19386 ptr := v_0
19387 if v_1.Op != OpAMD64FlagGT_ULT {
19388 break
19389 }
19390 mem := v_2
19391 v.reset(OpAMD64MOVBstore)
19392 v.AuxInt = int32ToAuxInt(off)
19393 v.Aux = symToAux(sym)
19394 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19395 v0.AuxInt = int32ToAuxInt(0)
19396 v.AddArg3(ptr, v0, mem)
19397 return true
19398 }
19399
19400
19401 for {
19402 off := auxIntToInt32(v.AuxInt)
19403 sym := auxToSym(v.Aux)
19404 ptr := v_0
19405 if v_1.Op != OpAMD64FlagGT_UGT {
19406 break
19407 }
19408 mem := v_2
19409 v.reset(OpAMD64MOVBstore)
19410 v.AuxInt = int32ToAuxInt(off)
19411 v.Aux = symToAux(sym)
19412 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19413 v0.AuxInt = int32ToAuxInt(0)
19414 v.AddArg3(ptr, v0, mem)
19415 return true
19416 }
19417 return false
19418 }
19419 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
19420 v_0 := v.Args[0]
19421 b := v.Block
19422
19423
19424 for {
19425 if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
19426 break
19427 }
19428 x := v_0.Args[0]
19429 v.reset(OpAMD64ANDLconst)
19430 v.AuxInt = int32ToAuxInt(1)
19431 v.AddArg(x)
19432 return true
19433 }
19434
19435
19436 for {
19437 if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
19438 break
19439 }
19440 x := v_0.Args[0]
19441 v.reset(OpAMD64ANDLconst)
19442 v.AuxInt = int32ToAuxInt(1)
19443 v.AddArg(x)
19444 return true
19445 }
19446
19447
19448 for {
19449 if v_0.Op != OpAMD64TESTL {
19450 break
19451 }
19452 _ = v_0.Args[1]
19453 v_0_0 := v_0.Args[0]
19454 v_0_1 := v_0.Args[1]
19455 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19456 if v_0_0.Op != OpAMD64SHLL {
19457 continue
19458 }
19459 x := v_0_0.Args[1]
19460 v_0_0_0 := v_0_0.Args[0]
19461 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
19462 continue
19463 }
19464 y := v_0_1
19465 v.reset(OpAMD64SETB)
19466 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
19467 v0.AddArg2(x, y)
19468 v.AddArg(v0)
19469 return true
19470 }
19471 break
19472 }
19473
19474
19475 for {
19476 if v_0.Op != OpAMD64TESTQ {
19477 break
19478 }
19479 _ = v_0.Args[1]
19480 v_0_0 := v_0.Args[0]
19481 v_0_1 := v_0.Args[1]
19482 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19483 if v_0_0.Op != OpAMD64SHLQ {
19484 continue
19485 }
19486 x := v_0_0.Args[1]
19487 v_0_0_0 := v_0_0.Args[0]
19488 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
19489 continue
19490 }
19491 y := v_0_1
19492 v.reset(OpAMD64SETB)
19493 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
19494 v0.AddArg2(x, y)
19495 v.AddArg(v0)
19496 return true
19497 }
19498 break
19499 }
19500
19501
19502
19503 for {
19504 if v_0.Op != OpAMD64TESTLconst {
19505 break
19506 }
19507 c := auxIntToInt32(v_0.AuxInt)
19508 x := v_0.Args[0]
19509 if !(isUint32PowerOfTwo(int64(c))) {
19510 break
19511 }
19512 v.reset(OpAMD64SETB)
19513 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19514 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19515 v0.AddArg(x)
19516 v.AddArg(v0)
19517 return true
19518 }
19519
19520
19521
19522 for {
19523 if v_0.Op != OpAMD64TESTQconst {
19524 break
19525 }
19526 c := auxIntToInt32(v_0.AuxInt)
19527 x := v_0.Args[0]
19528 if !(isUint64PowerOfTwo(int64(c))) {
19529 break
19530 }
19531 v.reset(OpAMD64SETB)
19532 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19533 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19534 v0.AddArg(x)
19535 v.AddArg(v0)
19536 return true
19537 }
19538
19539
19540
19541 for {
19542 if v_0.Op != OpAMD64TESTQ {
19543 break
19544 }
19545 _ = v_0.Args[1]
19546 v_0_0 := v_0.Args[0]
19547 v_0_1 := v_0.Args[1]
19548 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19549 if v_0_0.Op != OpAMD64MOVQconst {
19550 continue
19551 }
19552 c := auxIntToInt64(v_0_0.AuxInt)
19553 x := v_0_1
19554 if !(isUint64PowerOfTwo(c)) {
19555 continue
19556 }
19557 v.reset(OpAMD64SETB)
19558 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19559 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
19560 v0.AddArg(x)
19561 v.AddArg(v0)
19562 return true
19563 }
19564 break
19565 }
19566
19567
19568 for {
19569 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
19570 break
19571 }
19572 s := v_0.Args[0]
19573 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
19574 break
19575 }
19576 v.reset(OpAMD64SETEQ)
19577 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
19578 v0.AuxInt = int32ToAuxInt(0)
19579 v0.AddArg(s)
19580 v.AddArg(v0)
19581 return true
19582 }
19583
19584
19585 for {
19586 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
19587 break
19588 }
19589 s := v_0.Args[0]
19590 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
19591 break
19592 }
19593 v.reset(OpAMD64SETEQ)
19594 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
19595 v0.AuxInt = int32ToAuxInt(0)
19596 v0.AddArg(s)
19597 v.AddArg(v0)
19598 return true
19599 }
19600
19601
19602
19603 for {
19604 if v_0.Op != OpAMD64TESTQ {
19605 break
19606 }
19607 _ = v_0.Args[1]
19608 v_0_0 := v_0.Args[0]
19609 v_0_1 := v_0.Args[1]
19610 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19611 z1 := v_0_0
19612 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
19613 continue
19614 }
19615 z1_0 := z1.Args[0]
19616 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19617 continue
19618 }
19619 x := z1_0.Args[0]
19620 z2 := v_0_1
19621 if !(z1 == z2) {
19622 continue
19623 }
19624 v.reset(OpAMD64SETB)
19625 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19626 v0.AuxInt = int8ToAuxInt(63)
19627 v0.AddArg(x)
19628 v.AddArg(v0)
19629 return true
19630 }
19631 break
19632 }
19633
19634
19635
19636 for {
19637 if v_0.Op != OpAMD64TESTL {
19638 break
19639 }
19640 _ = v_0.Args[1]
19641 v_0_0 := v_0.Args[0]
19642 v_0_1 := v_0.Args[1]
19643 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19644 z1 := v_0_0
19645 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
19646 continue
19647 }
19648 z1_0 := z1.Args[0]
19649 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
19650 continue
19651 }
19652 x := z1_0.Args[0]
19653 z2 := v_0_1
19654 if !(z1 == z2) {
19655 continue
19656 }
19657 v.reset(OpAMD64SETB)
19658 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19659 v0.AuxInt = int8ToAuxInt(31)
19660 v0.AddArg(x)
19661 v.AddArg(v0)
19662 return true
19663 }
19664 break
19665 }
19666
19667
19668
19669 for {
19670 if v_0.Op != OpAMD64TESTQ {
19671 break
19672 }
19673 _ = v_0.Args[1]
19674 v_0_0 := v_0.Args[0]
19675 v_0_1 := v_0.Args[1]
19676 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19677 z1 := v_0_0
19678 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
19679 continue
19680 }
19681 z1_0 := z1.Args[0]
19682 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19683 continue
19684 }
19685 x := z1_0.Args[0]
19686 z2 := v_0_1
19687 if !(z1 == z2) {
19688 continue
19689 }
19690 v.reset(OpAMD64SETB)
19691 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19692 v0.AuxInt = int8ToAuxInt(0)
19693 v0.AddArg(x)
19694 v.AddArg(v0)
19695 return true
19696 }
19697 break
19698 }
19699
19700
19701
19702 for {
19703 if v_0.Op != OpAMD64TESTL {
19704 break
19705 }
19706 _ = v_0.Args[1]
19707 v_0_0 := v_0.Args[0]
19708 v_0_1 := v_0.Args[1]
19709 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19710 z1 := v_0_0
19711 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
19712 continue
19713 }
19714 z1_0 := z1.Args[0]
19715 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
19716 continue
19717 }
19718 x := z1_0.Args[0]
19719 z2 := v_0_1
19720 if !(z1 == z2) {
19721 continue
19722 }
19723 v.reset(OpAMD64SETB)
19724 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19725 v0.AuxInt = int8ToAuxInt(0)
19726 v0.AddArg(x)
19727 v.AddArg(v0)
19728 return true
19729 }
19730 break
19731 }
19732
19733
19734
19735 for {
19736 if v_0.Op != OpAMD64TESTQ {
19737 break
19738 }
19739 _ = v_0.Args[1]
19740 v_0_0 := v_0.Args[0]
19741 v_0_1 := v_0.Args[1]
19742 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19743 z1 := v_0_0
19744 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
19745 continue
19746 }
19747 x := z1.Args[0]
19748 z2 := v_0_1
19749 if !(z1 == z2) {
19750 continue
19751 }
19752 v.reset(OpAMD64SETB)
19753 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19754 v0.AuxInt = int8ToAuxInt(63)
19755 v0.AddArg(x)
19756 v.AddArg(v0)
19757 return true
19758 }
19759 break
19760 }
19761
19762
19763
19764 for {
19765 if v_0.Op != OpAMD64TESTL {
19766 break
19767 }
19768 _ = v_0.Args[1]
19769 v_0_0 := v_0.Args[0]
19770 v_0_1 := v_0.Args[1]
19771 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19772 z1 := v_0_0
19773 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
19774 continue
19775 }
19776 x := z1.Args[0]
19777 z2 := v_0_1
19778 if !(z1 == z2) {
19779 continue
19780 }
19781 v.reset(OpAMD64SETB)
19782 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19783 v0.AuxInt = int8ToAuxInt(31)
19784 v0.AddArg(x)
19785 v.AddArg(v0)
19786 return true
19787 }
19788 break
19789 }
19790
19791
19792 for {
19793 if v_0.Op != OpAMD64InvertFlags {
19794 break
19795 }
19796 x := v_0.Args[0]
19797 v.reset(OpAMD64SETNE)
19798 v.AddArg(x)
19799 return true
19800 }
19801
19802
19803 for {
19804 if v_0.Op != OpAMD64FlagEQ {
19805 break
19806 }
19807 v.reset(OpAMD64MOVLconst)
19808 v.AuxInt = int32ToAuxInt(0)
19809 return true
19810 }
19811
19812
19813 for {
19814 if v_0.Op != OpAMD64FlagLT_ULT {
19815 break
19816 }
19817 v.reset(OpAMD64MOVLconst)
19818 v.AuxInt = int32ToAuxInt(1)
19819 return true
19820 }
19821
19822
19823 for {
19824 if v_0.Op != OpAMD64FlagLT_UGT {
19825 break
19826 }
19827 v.reset(OpAMD64MOVLconst)
19828 v.AuxInt = int32ToAuxInt(1)
19829 return true
19830 }
19831
19832
19833 for {
19834 if v_0.Op != OpAMD64FlagGT_ULT {
19835 break
19836 }
19837 v.reset(OpAMD64MOVLconst)
19838 v.AuxInt = int32ToAuxInt(1)
19839 return true
19840 }
19841
19842
19843 for {
19844 if v_0.Op != OpAMD64FlagGT_UGT {
19845 break
19846 }
19847 v.reset(OpAMD64MOVLconst)
19848 v.AuxInt = int32ToAuxInt(1)
19849 return true
19850 }
19851
19852
19853 for {
19854 if v_0.Op != OpAMD64TESTQ {
19855 break
19856 }
19857 _ = v_0.Args[1]
19858 v_0_0 := v_0.Args[0]
19859 v_0_1 := v_0.Args[1]
19860 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19861 s := v_0_0
19862 if s.Op != OpSelect0 {
19863 continue
19864 }
19865 blsr := s.Args[0]
19866 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
19867 continue
19868 }
19869 v.reset(OpAMD64SETNE)
19870 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
19871 v0.AddArg(blsr)
19872 v.AddArg(v0)
19873 return true
19874 }
19875 break
19876 }
19877
19878
19879 for {
19880 if v_0.Op != OpAMD64TESTL {
19881 break
19882 }
19883 _ = v_0.Args[1]
19884 v_0_0 := v_0.Args[0]
19885 v_0_1 := v_0.Args[1]
19886 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19887 s := v_0_0
19888 if s.Op != OpSelect0 {
19889 continue
19890 }
19891 blsr := s.Args[0]
19892 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
19893 continue
19894 }
19895 v.reset(OpAMD64SETNE)
19896 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
19897 v0.AddArg(blsr)
19898 v.AddArg(v0)
19899 return true
19900 }
19901 break
19902 }
19903 return false
19904 }
19905 func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
19906 v_2 := v.Args[2]
19907 v_1 := v.Args[1]
19908 v_0 := v.Args[0]
19909 b := v.Block
19910 typ := &b.Func.Config.Types
19911
19912
19913 for {
19914 off := auxIntToInt32(v.AuxInt)
19915 sym := auxToSym(v.Aux)
19916 ptr := v_0
19917 if v_1.Op != OpAMD64TESTL {
19918 break
19919 }
19920 _ = v_1.Args[1]
19921 v_1_0 := v_1.Args[0]
19922 v_1_1 := v_1.Args[1]
19923 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19924 if v_1_0.Op != OpAMD64SHLL {
19925 continue
19926 }
19927 x := v_1_0.Args[1]
19928 v_1_0_0 := v_1_0.Args[0]
19929 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
19930 continue
19931 }
19932 y := v_1_1
19933 mem := v_2
19934 v.reset(OpAMD64SETBstore)
19935 v.AuxInt = int32ToAuxInt(off)
19936 v.Aux = symToAux(sym)
19937 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
19938 v0.AddArg2(x, y)
19939 v.AddArg3(ptr, v0, mem)
19940 return true
19941 }
19942 break
19943 }
19944
19945
19946 for {
19947 off := auxIntToInt32(v.AuxInt)
19948 sym := auxToSym(v.Aux)
19949 ptr := v_0
19950 if v_1.Op != OpAMD64TESTQ {
19951 break
19952 }
19953 _ = v_1.Args[1]
19954 v_1_0 := v_1.Args[0]
19955 v_1_1 := v_1.Args[1]
19956 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19957 if v_1_0.Op != OpAMD64SHLQ {
19958 continue
19959 }
19960 x := v_1_0.Args[1]
19961 v_1_0_0 := v_1_0.Args[0]
19962 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
19963 continue
19964 }
19965 y := v_1_1
19966 mem := v_2
19967 v.reset(OpAMD64SETBstore)
19968 v.AuxInt = int32ToAuxInt(off)
19969 v.Aux = symToAux(sym)
19970 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
19971 v0.AddArg2(x, y)
19972 v.AddArg3(ptr, v0, mem)
19973 return true
19974 }
19975 break
19976 }
19977
19978
19979
19980 for {
19981 off := auxIntToInt32(v.AuxInt)
19982 sym := auxToSym(v.Aux)
19983 ptr := v_0
19984 if v_1.Op != OpAMD64TESTLconst {
19985 break
19986 }
19987 c := auxIntToInt32(v_1.AuxInt)
19988 x := v_1.Args[0]
19989 mem := v_2
19990 if !(isUint32PowerOfTwo(int64(c))) {
19991 break
19992 }
19993 v.reset(OpAMD64SETBstore)
19994 v.AuxInt = int32ToAuxInt(off)
19995 v.Aux = symToAux(sym)
19996 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19997 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19998 v0.AddArg(x)
19999 v.AddArg3(ptr, v0, mem)
20000 return true
20001 }
20002
20003
20004
20005 for {
20006 off := auxIntToInt32(v.AuxInt)
20007 sym := auxToSym(v.Aux)
20008 ptr := v_0
20009 if v_1.Op != OpAMD64TESTQconst {
20010 break
20011 }
20012 c := auxIntToInt32(v_1.AuxInt)
20013 x := v_1.Args[0]
20014 mem := v_2
20015 if !(isUint64PowerOfTwo(int64(c))) {
20016 break
20017 }
20018 v.reset(OpAMD64SETBstore)
20019 v.AuxInt = int32ToAuxInt(off)
20020 v.Aux = symToAux(sym)
20021 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20022 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
20023 v0.AddArg(x)
20024 v.AddArg3(ptr, v0, mem)
20025 return true
20026 }
20027
20028
20029
20030 for {
20031 off := auxIntToInt32(v.AuxInt)
20032 sym := auxToSym(v.Aux)
20033 ptr := v_0
20034 if v_1.Op != OpAMD64TESTQ {
20035 break
20036 }
20037 _ = v_1.Args[1]
20038 v_1_0 := v_1.Args[0]
20039 v_1_1 := v_1.Args[1]
20040 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20041 if v_1_0.Op != OpAMD64MOVQconst {
20042 continue
20043 }
20044 c := auxIntToInt64(v_1_0.AuxInt)
20045 x := v_1_1
20046 mem := v_2
20047 if !(isUint64PowerOfTwo(c)) {
20048 continue
20049 }
20050 v.reset(OpAMD64SETBstore)
20051 v.AuxInt = int32ToAuxInt(off)
20052 v.Aux = symToAux(sym)
20053 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20054 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
20055 v0.AddArg(x)
20056 v.AddArg3(ptr, v0, mem)
20057 return true
20058 }
20059 break
20060 }
20061
20062
20063 for {
20064 off := auxIntToInt32(v.AuxInt)
20065 sym := auxToSym(v.Aux)
20066 ptr := v_0
20067 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
20068 break
20069 }
20070 s := v_1.Args[0]
20071 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
20072 break
20073 }
20074 mem := v_2
20075 v.reset(OpAMD64SETEQstore)
20076 v.AuxInt = int32ToAuxInt(off)
20077 v.Aux = symToAux(sym)
20078 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
20079 v0.AuxInt = int32ToAuxInt(0)
20080 v0.AddArg(s)
20081 v.AddArg3(ptr, v0, mem)
20082 return true
20083 }
20084
20085
20086 for {
20087 off := auxIntToInt32(v.AuxInt)
20088 sym := auxToSym(v.Aux)
20089 ptr := v_0
20090 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
20091 break
20092 }
20093 s := v_1.Args[0]
20094 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
20095 break
20096 }
20097 mem := v_2
20098 v.reset(OpAMD64SETEQstore)
20099 v.AuxInt = int32ToAuxInt(off)
20100 v.Aux = symToAux(sym)
20101 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
20102 v0.AuxInt = int32ToAuxInt(0)
20103 v0.AddArg(s)
20104 v.AddArg3(ptr, v0, mem)
20105 return true
20106 }
20107
20108
20109
20110 for {
20111 off := auxIntToInt32(v.AuxInt)
20112 sym := auxToSym(v.Aux)
20113 ptr := v_0
20114 if v_1.Op != OpAMD64TESTQ {
20115 break
20116 }
20117 _ = v_1.Args[1]
20118 v_1_0 := v_1.Args[0]
20119 v_1_1 := v_1.Args[1]
20120 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20121 z1 := v_1_0
20122 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
20123 continue
20124 }
20125 z1_0 := z1.Args[0]
20126 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20127 continue
20128 }
20129 x := z1_0.Args[0]
20130 z2 := v_1_1
20131 mem := v_2
20132 if !(z1 == z2) {
20133 continue
20134 }
20135 v.reset(OpAMD64SETBstore)
20136 v.AuxInt = int32ToAuxInt(off)
20137 v.Aux = symToAux(sym)
20138 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20139 v0.AuxInt = int8ToAuxInt(63)
20140 v0.AddArg(x)
20141 v.AddArg3(ptr, v0, mem)
20142 return true
20143 }
20144 break
20145 }
20146
20147
20148
20149 for {
20150 off := auxIntToInt32(v.AuxInt)
20151 sym := auxToSym(v.Aux)
20152 ptr := v_0
20153 if v_1.Op != OpAMD64TESTL {
20154 break
20155 }
20156 _ = v_1.Args[1]
20157 v_1_0 := v_1.Args[0]
20158 v_1_1 := v_1.Args[1]
20159 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20160 z1 := v_1_0
20161 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
20162 continue
20163 }
20164 z1_0 := z1.Args[0]
20165 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20166 continue
20167 }
20168 x := z1_0.Args[0]
20169 z2 := v_1_1
20170 mem := v_2
20171 if !(z1 == z2) {
20172 continue
20173 }
20174 v.reset(OpAMD64SETBstore)
20175 v.AuxInt = int32ToAuxInt(off)
20176 v.Aux = symToAux(sym)
20177 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20178 v0.AuxInt = int8ToAuxInt(31)
20179 v0.AddArg(x)
20180 v.AddArg3(ptr, v0, mem)
20181 return true
20182 }
20183 break
20184 }
20185
20186
20187
20188 for {
20189 off := auxIntToInt32(v.AuxInt)
20190 sym := auxToSym(v.Aux)
20191 ptr := v_0
20192 if v_1.Op != OpAMD64TESTQ {
20193 break
20194 }
20195 _ = v_1.Args[1]
20196 v_1_0 := v_1.Args[0]
20197 v_1_1 := v_1.Args[1]
20198 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20199 z1 := v_1_0
20200 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20201 continue
20202 }
20203 z1_0 := z1.Args[0]
20204 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20205 continue
20206 }
20207 x := z1_0.Args[0]
20208 z2 := v_1_1
20209 mem := v_2
20210 if !(z1 == z2) {
20211 continue
20212 }
20213 v.reset(OpAMD64SETBstore)
20214 v.AuxInt = int32ToAuxInt(off)
20215 v.Aux = symToAux(sym)
20216 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20217 v0.AuxInt = int8ToAuxInt(0)
20218 v0.AddArg(x)
20219 v.AddArg3(ptr, v0, mem)
20220 return true
20221 }
20222 break
20223 }
20224
20225
20226
20227 for {
20228 off := auxIntToInt32(v.AuxInt)
20229 sym := auxToSym(v.Aux)
20230 ptr := v_0
20231 if v_1.Op != OpAMD64TESTL {
20232 break
20233 }
20234 _ = v_1.Args[1]
20235 v_1_0 := v_1.Args[0]
20236 v_1_1 := v_1.Args[1]
20237 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20238 z1 := v_1_0
20239 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20240 continue
20241 }
20242 z1_0 := z1.Args[0]
20243 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20244 continue
20245 }
20246 x := z1_0.Args[0]
20247 z2 := v_1_1
20248 mem := v_2
20249 if !(z1 == z2) {
20250 continue
20251 }
20252 v.reset(OpAMD64SETBstore)
20253 v.AuxInt = int32ToAuxInt(off)
20254 v.Aux = symToAux(sym)
20255 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20256 v0.AuxInt = int8ToAuxInt(0)
20257 v0.AddArg(x)
20258 v.AddArg3(ptr, v0, mem)
20259 return true
20260 }
20261 break
20262 }
20263
20264
20265
20266 for {
20267 off := auxIntToInt32(v.AuxInt)
20268 sym := auxToSym(v.Aux)
20269 ptr := v_0
20270 if v_1.Op != OpAMD64TESTQ {
20271 break
20272 }
20273 _ = v_1.Args[1]
20274 v_1_0 := v_1.Args[0]
20275 v_1_1 := v_1.Args[1]
20276 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20277 z1 := v_1_0
20278 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20279 continue
20280 }
20281 x := z1.Args[0]
20282 z2 := v_1_1
20283 mem := v_2
20284 if !(z1 == z2) {
20285 continue
20286 }
20287 v.reset(OpAMD64SETBstore)
20288 v.AuxInt = int32ToAuxInt(off)
20289 v.Aux = symToAux(sym)
20290 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20291 v0.AuxInt = int8ToAuxInt(63)
20292 v0.AddArg(x)
20293 v.AddArg3(ptr, v0, mem)
20294 return true
20295 }
20296 break
20297 }
20298
20299
20300
20301 for {
20302 off := auxIntToInt32(v.AuxInt)
20303 sym := auxToSym(v.Aux)
20304 ptr := v_0
20305 if v_1.Op != OpAMD64TESTL {
20306 break
20307 }
20308 _ = v_1.Args[1]
20309 v_1_0 := v_1.Args[0]
20310 v_1_1 := v_1.Args[1]
20311 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20312 z1 := v_1_0
20313 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20314 continue
20315 }
20316 x := z1.Args[0]
20317 z2 := v_1_1
20318 mem := v_2
20319 if !(z1 == z2) {
20320 continue
20321 }
20322 v.reset(OpAMD64SETBstore)
20323 v.AuxInt = int32ToAuxInt(off)
20324 v.Aux = symToAux(sym)
20325 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20326 v0.AuxInt = int8ToAuxInt(31)
20327 v0.AddArg(x)
20328 v.AddArg3(ptr, v0, mem)
20329 return true
20330 }
20331 break
20332 }
20333
20334
20335 for {
20336 off := auxIntToInt32(v.AuxInt)
20337 sym := auxToSym(v.Aux)
20338 ptr := v_0
20339 if v_1.Op != OpAMD64InvertFlags {
20340 break
20341 }
20342 x := v_1.Args[0]
20343 mem := v_2
20344 v.reset(OpAMD64SETNEstore)
20345 v.AuxInt = int32ToAuxInt(off)
20346 v.Aux = symToAux(sym)
20347 v.AddArg3(ptr, x, mem)
20348 return true
20349 }
20350
20351
20352
20353 for {
20354 off1 := auxIntToInt32(v.AuxInt)
20355 sym := auxToSym(v.Aux)
20356 if v_0.Op != OpAMD64ADDQconst {
20357 break
20358 }
20359 off2 := auxIntToInt32(v_0.AuxInt)
20360 base := v_0.Args[0]
20361 val := v_1
20362 mem := v_2
20363 if !(is32Bit(int64(off1) + int64(off2))) {
20364 break
20365 }
20366 v.reset(OpAMD64SETNEstore)
20367 v.AuxInt = int32ToAuxInt(off1 + off2)
20368 v.Aux = symToAux(sym)
20369 v.AddArg3(base, val, mem)
20370 return true
20371 }
20372
20373
20374
20375 for {
20376 off1 := auxIntToInt32(v.AuxInt)
20377 sym1 := auxToSym(v.Aux)
20378 if v_0.Op != OpAMD64LEAQ {
20379 break
20380 }
20381 off2 := auxIntToInt32(v_0.AuxInt)
20382 sym2 := auxToSym(v_0.Aux)
20383 base := v_0.Args[0]
20384 val := v_1
20385 mem := v_2
20386 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20387 break
20388 }
20389 v.reset(OpAMD64SETNEstore)
20390 v.AuxInt = int32ToAuxInt(off1 + off2)
20391 v.Aux = symToAux(mergeSym(sym1, sym2))
20392 v.AddArg3(base, val, mem)
20393 return true
20394 }
20395
20396
20397 for {
20398 off := auxIntToInt32(v.AuxInt)
20399 sym := auxToSym(v.Aux)
20400 ptr := v_0
20401 if v_1.Op != OpAMD64FlagEQ {
20402 break
20403 }
20404 mem := v_2
20405 v.reset(OpAMD64MOVBstore)
20406 v.AuxInt = int32ToAuxInt(off)
20407 v.Aux = symToAux(sym)
20408 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20409 v0.AuxInt = int32ToAuxInt(0)
20410 v.AddArg3(ptr, v0, mem)
20411 return true
20412 }
20413
20414
20415 for {
20416 off := auxIntToInt32(v.AuxInt)
20417 sym := auxToSym(v.Aux)
20418 ptr := v_0
20419 if v_1.Op != OpAMD64FlagLT_ULT {
20420 break
20421 }
20422 mem := v_2
20423 v.reset(OpAMD64MOVBstore)
20424 v.AuxInt = int32ToAuxInt(off)
20425 v.Aux = symToAux(sym)
20426 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20427 v0.AuxInt = int32ToAuxInt(1)
20428 v.AddArg3(ptr, v0, mem)
20429 return true
20430 }
20431
20432
20433 for {
20434 off := auxIntToInt32(v.AuxInt)
20435 sym := auxToSym(v.Aux)
20436 ptr := v_0
20437 if v_1.Op != OpAMD64FlagLT_UGT {
20438 break
20439 }
20440 mem := v_2
20441 v.reset(OpAMD64MOVBstore)
20442 v.AuxInt = int32ToAuxInt(off)
20443 v.Aux = symToAux(sym)
20444 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20445 v0.AuxInt = int32ToAuxInt(1)
20446 v.AddArg3(ptr, v0, mem)
20447 return true
20448 }
20449
20450
20451 for {
20452 off := auxIntToInt32(v.AuxInt)
20453 sym := auxToSym(v.Aux)
20454 ptr := v_0
20455 if v_1.Op != OpAMD64FlagGT_ULT {
20456 break
20457 }
20458 mem := v_2
20459 v.reset(OpAMD64MOVBstore)
20460 v.AuxInt = int32ToAuxInt(off)
20461 v.Aux = symToAux(sym)
20462 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20463 v0.AuxInt = int32ToAuxInt(1)
20464 v.AddArg3(ptr, v0, mem)
20465 return true
20466 }
20467
20468
20469 for {
20470 off := auxIntToInt32(v.AuxInt)
20471 sym := auxToSym(v.Aux)
20472 ptr := v_0
20473 if v_1.Op != OpAMD64FlagGT_UGT {
20474 break
20475 }
20476 mem := v_2
20477 v.reset(OpAMD64MOVBstore)
20478 v.AuxInt = int32ToAuxInt(off)
20479 v.Aux = symToAux(sym)
20480 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20481 v0.AuxInt = int32ToAuxInt(1)
20482 v.AddArg3(ptr, v0, mem)
20483 return true
20484 }
20485 return false
20486 }
20487 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
20488 v_1 := v.Args[1]
20489 v_0 := v.Args[0]
20490 b := v.Block
20491
20492
20493 for {
20494 x := v_0
20495 if v_1.Op != OpAMD64MOVQconst {
20496 break
20497 }
20498 c := auxIntToInt64(v_1.AuxInt)
20499 v.reset(OpAMD64SHLLconst)
20500 v.AuxInt = int8ToAuxInt(int8(c & 31))
20501 v.AddArg(x)
20502 return true
20503 }
20504
20505
20506 for {
20507 x := v_0
20508 if v_1.Op != OpAMD64MOVLconst {
20509 break
20510 }
20511 c := auxIntToInt32(v_1.AuxInt)
20512 v.reset(OpAMD64SHLLconst)
20513 v.AuxInt = int8ToAuxInt(int8(c & 31))
20514 v.AddArg(x)
20515 return true
20516 }
20517
20518
20519
20520 for {
20521 x := v_0
20522 if v_1.Op != OpAMD64ADDQconst {
20523 break
20524 }
20525 c := auxIntToInt32(v_1.AuxInt)
20526 y := v_1.Args[0]
20527 if !(c&31 == 0) {
20528 break
20529 }
20530 v.reset(OpAMD64SHLL)
20531 v.AddArg2(x, y)
20532 return true
20533 }
20534
20535
20536
20537 for {
20538 x := v_0
20539 if v_1.Op != OpAMD64NEGQ {
20540 break
20541 }
20542 t := v_1.Type
20543 v_1_0 := v_1.Args[0]
20544 if v_1_0.Op != OpAMD64ADDQconst {
20545 break
20546 }
20547 c := auxIntToInt32(v_1_0.AuxInt)
20548 y := v_1_0.Args[0]
20549 if !(c&31 == 0) {
20550 break
20551 }
20552 v.reset(OpAMD64SHLL)
20553 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20554 v0.AddArg(y)
20555 v.AddArg2(x, v0)
20556 return true
20557 }
20558
20559
20560
20561 for {
20562 x := v_0
20563 if v_1.Op != OpAMD64ANDQconst {
20564 break
20565 }
20566 c := auxIntToInt32(v_1.AuxInt)
20567 y := v_1.Args[0]
20568 if !(c&31 == 31) {
20569 break
20570 }
20571 v.reset(OpAMD64SHLL)
20572 v.AddArg2(x, y)
20573 return true
20574 }
20575
20576
20577
20578 for {
20579 x := v_0
20580 if v_1.Op != OpAMD64NEGQ {
20581 break
20582 }
20583 t := v_1.Type
20584 v_1_0 := v_1.Args[0]
20585 if v_1_0.Op != OpAMD64ANDQconst {
20586 break
20587 }
20588 c := auxIntToInt32(v_1_0.AuxInt)
20589 y := v_1_0.Args[0]
20590 if !(c&31 == 31) {
20591 break
20592 }
20593 v.reset(OpAMD64SHLL)
20594 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20595 v0.AddArg(y)
20596 v.AddArg2(x, v0)
20597 return true
20598 }
20599
20600
20601
20602 for {
20603 x := v_0
20604 if v_1.Op != OpAMD64ADDLconst {
20605 break
20606 }
20607 c := auxIntToInt32(v_1.AuxInt)
20608 y := v_1.Args[0]
20609 if !(c&31 == 0) {
20610 break
20611 }
20612 v.reset(OpAMD64SHLL)
20613 v.AddArg2(x, y)
20614 return true
20615 }
20616
20617
20618
20619 for {
20620 x := v_0
20621 if v_1.Op != OpAMD64NEGL {
20622 break
20623 }
20624 t := v_1.Type
20625 v_1_0 := v_1.Args[0]
20626 if v_1_0.Op != OpAMD64ADDLconst {
20627 break
20628 }
20629 c := auxIntToInt32(v_1_0.AuxInt)
20630 y := v_1_0.Args[0]
20631 if !(c&31 == 0) {
20632 break
20633 }
20634 v.reset(OpAMD64SHLL)
20635 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20636 v0.AddArg(y)
20637 v.AddArg2(x, v0)
20638 return true
20639 }
20640
20641
20642
20643 for {
20644 x := v_0
20645 if v_1.Op != OpAMD64ANDLconst {
20646 break
20647 }
20648 c := auxIntToInt32(v_1.AuxInt)
20649 y := v_1.Args[0]
20650 if !(c&31 == 31) {
20651 break
20652 }
20653 v.reset(OpAMD64SHLL)
20654 v.AddArg2(x, y)
20655 return true
20656 }
20657
20658
20659
20660 for {
20661 x := v_0
20662 if v_1.Op != OpAMD64NEGL {
20663 break
20664 }
20665 t := v_1.Type
20666 v_1_0 := v_1.Args[0]
20667 if v_1_0.Op != OpAMD64ANDLconst {
20668 break
20669 }
20670 c := auxIntToInt32(v_1_0.AuxInt)
20671 y := v_1_0.Args[0]
20672 if !(c&31 == 31) {
20673 break
20674 }
20675 v.reset(OpAMD64SHLL)
20676 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20677 v0.AddArg(y)
20678 v.AddArg2(x, v0)
20679 return true
20680 }
20681
20682
20683
20684 for {
20685 l := v_0
20686 if l.Op != OpAMD64MOVLload {
20687 break
20688 }
20689 off := auxIntToInt32(l.AuxInt)
20690 sym := auxToSym(l.Aux)
20691 mem := l.Args[1]
20692 ptr := l.Args[0]
20693 x := v_1
20694 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
20695 break
20696 }
20697 v.reset(OpAMD64SHLXLload)
20698 v.AuxInt = int32ToAuxInt(off)
20699 v.Aux = symToAux(sym)
20700 v.AddArg3(ptr, x, mem)
20701 return true
20702 }
20703 return false
20704 }
20705 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
20706 v_0 := v.Args[0]
20707
20708
20709 for {
20710 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
20711 break
20712 }
20713 x := v_0.Args[0]
20714 v.reset(OpAMD64ANDLconst)
20715 v.AuxInt = int32ToAuxInt(-2)
20716 v.AddArg(x)
20717 return true
20718 }
20719
20720
20721 for {
20722 if auxIntToInt8(v.AuxInt) != 0 {
20723 break
20724 }
20725 x := v_0
20726 v.copyOf(x)
20727 return true
20728 }
20729
20730
20731 for {
20732 d := auxIntToInt8(v.AuxInt)
20733 if v_0.Op != OpAMD64MOVLconst {
20734 break
20735 }
20736 c := auxIntToInt32(v_0.AuxInt)
20737 v.reset(OpAMD64MOVLconst)
20738 v.AuxInt = int32ToAuxInt(c << uint64(d))
20739 return true
20740 }
20741 return false
20742 }
20743 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
20744 v_1 := v.Args[1]
20745 v_0 := v.Args[0]
20746 b := v.Block
20747
20748
20749 for {
20750 x := v_0
20751 if v_1.Op != OpAMD64MOVQconst {
20752 break
20753 }
20754 c := auxIntToInt64(v_1.AuxInt)
20755 v.reset(OpAMD64SHLQconst)
20756 v.AuxInt = int8ToAuxInt(int8(c & 63))
20757 v.AddArg(x)
20758 return true
20759 }
20760
20761
20762 for {
20763 x := v_0
20764 if v_1.Op != OpAMD64MOVLconst {
20765 break
20766 }
20767 c := auxIntToInt32(v_1.AuxInt)
20768 v.reset(OpAMD64SHLQconst)
20769 v.AuxInt = int8ToAuxInt(int8(c & 63))
20770 v.AddArg(x)
20771 return true
20772 }
20773
20774
20775
20776 for {
20777 x := v_0
20778 if v_1.Op != OpAMD64ADDQconst {
20779 break
20780 }
20781 c := auxIntToInt32(v_1.AuxInt)
20782 y := v_1.Args[0]
20783 if !(c&63 == 0) {
20784 break
20785 }
20786 v.reset(OpAMD64SHLQ)
20787 v.AddArg2(x, y)
20788 return true
20789 }
20790
20791
20792
20793 for {
20794 x := v_0
20795 if v_1.Op != OpAMD64NEGQ {
20796 break
20797 }
20798 t := v_1.Type
20799 v_1_0 := v_1.Args[0]
20800 if v_1_0.Op != OpAMD64ADDQconst {
20801 break
20802 }
20803 c := auxIntToInt32(v_1_0.AuxInt)
20804 y := v_1_0.Args[0]
20805 if !(c&63 == 0) {
20806 break
20807 }
20808 v.reset(OpAMD64SHLQ)
20809 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20810 v0.AddArg(y)
20811 v.AddArg2(x, v0)
20812 return true
20813 }
20814
20815
20816
20817 for {
20818 x := v_0
20819 if v_1.Op != OpAMD64ANDQconst {
20820 break
20821 }
20822 c := auxIntToInt32(v_1.AuxInt)
20823 y := v_1.Args[0]
20824 if !(c&63 == 63) {
20825 break
20826 }
20827 v.reset(OpAMD64SHLQ)
20828 v.AddArg2(x, y)
20829 return true
20830 }
20831
20832
20833
20834 for {
20835 x := v_0
20836 if v_1.Op != OpAMD64NEGQ {
20837 break
20838 }
20839 t := v_1.Type
20840 v_1_0 := v_1.Args[0]
20841 if v_1_0.Op != OpAMD64ANDQconst {
20842 break
20843 }
20844 c := auxIntToInt32(v_1_0.AuxInt)
20845 y := v_1_0.Args[0]
20846 if !(c&63 == 63) {
20847 break
20848 }
20849 v.reset(OpAMD64SHLQ)
20850 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20851 v0.AddArg(y)
20852 v.AddArg2(x, v0)
20853 return true
20854 }
20855
20856
20857
20858 for {
20859 x := v_0
20860 if v_1.Op != OpAMD64ADDLconst {
20861 break
20862 }
20863 c := auxIntToInt32(v_1.AuxInt)
20864 y := v_1.Args[0]
20865 if !(c&63 == 0) {
20866 break
20867 }
20868 v.reset(OpAMD64SHLQ)
20869 v.AddArg2(x, y)
20870 return true
20871 }
20872
20873
20874
20875 for {
20876 x := v_0
20877 if v_1.Op != OpAMD64NEGL {
20878 break
20879 }
20880 t := v_1.Type
20881 v_1_0 := v_1.Args[0]
20882 if v_1_0.Op != OpAMD64ADDLconst {
20883 break
20884 }
20885 c := auxIntToInt32(v_1_0.AuxInt)
20886 y := v_1_0.Args[0]
20887 if !(c&63 == 0) {
20888 break
20889 }
20890 v.reset(OpAMD64SHLQ)
20891 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20892 v0.AddArg(y)
20893 v.AddArg2(x, v0)
20894 return true
20895 }
20896
20897
20898
20899 for {
20900 x := v_0
20901 if v_1.Op != OpAMD64ANDLconst {
20902 break
20903 }
20904 c := auxIntToInt32(v_1.AuxInt)
20905 y := v_1.Args[0]
20906 if !(c&63 == 63) {
20907 break
20908 }
20909 v.reset(OpAMD64SHLQ)
20910 v.AddArg2(x, y)
20911 return true
20912 }
20913
20914
20915
20916 for {
20917 x := v_0
20918 if v_1.Op != OpAMD64NEGL {
20919 break
20920 }
20921 t := v_1.Type
20922 v_1_0 := v_1.Args[0]
20923 if v_1_0.Op != OpAMD64ANDLconst {
20924 break
20925 }
20926 c := auxIntToInt32(v_1_0.AuxInt)
20927 y := v_1_0.Args[0]
20928 if !(c&63 == 63) {
20929 break
20930 }
20931 v.reset(OpAMD64SHLQ)
20932 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20933 v0.AddArg(y)
20934 v.AddArg2(x, v0)
20935 return true
20936 }
20937
20938
20939
20940 for {
20941 l := v_0
20942 if l.Op != OpAMD64MOVQload {
20943 break
20944 }
20945 off := auxIntToInt32(l.AuxInt)
20946 sym := auxToSym(l.Aux)
20947 mem := l.Args[1]
20948 ptr := l.Args[0]
20949 x := v_1
20950 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
20951 break
20952 }
20953 v.reset(OpAMD64SHLXQload)
20954 v.AuxInt = int32ToAuxInt(off)
20955 v.Aux = symToAux(sym)
20956 v.AddArg3(ptr, x, mem)
20957 return true
20958 }
20959 return false
20960 }
20961 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
20962 v_0 := v.Args[0]
20963
20964
20965 for {
20966 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
20967 break
20968 }
20969 x := v_0.Args[0]
20970 v.reset(OpAMD64ANDQconst)
20971 v.AuxInt = int32ToAuxInt(-2)
20972 v.AddArg(x)
20973 return true
20974 }
20975
20976
20977 for {
20978 if auxIntToInt8(v.AuxInt) != 0 {
20979 break
20980 }
20981 x := v_0
20982 v.copyOf(x)
20983 return true
20984 }
20985
20986
20987 for {
20988 d := auxIntToInt8(v.AuxInt)
20989 if v_0.Op != OpAMD64MOVQconst {
20990 break
20991 }
20992 c := auxIntToInt64(v_0.AuxInt)
20993 v.reset(OpAMD64MOVQconst)
20994 v.AuxInt = int64ToAuxInt(c << uint64(d))
20995 return true
20996 }
20997
20998
20999 for {
21000 d := auxIntToInt8(v.AuxInt)
21001 if v_0.Op != OpAMD64MOVLconst {
21002 break
21003 }
21004 c := auxIntToInt32(v_0.AuxInt)
21005 v.reset(OpAMD64MOVQconst)
21006 v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
21007 return true
21008 }
21009 return false
21010 }
21011 func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool {
21012 v_2 := v.Args[2]
21013 v_1 := v.Args[1]
21014 v_0 := v.Args[0]
21015 b := v.Block
21016 typ := &b.Func.Config.Types
21017
21018
21019 for {
21020 off := auxIntToInt32(v.AuxInt)
21021 sym := auxToSym(v.Aux)
21022 ptr := v_0
21023 if v_1.Op != OpAMD64MOVLconst {
21024 break
21025 }
21026 c := auxIntToInt32(v_1.AuxInt)
21027 mem := v_2
21028 v.reset(OpAMD64SHLLconst)
21029 v.AuxInt = int8ToAuxInt(int8(c & 31))
21030 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
21031 v0.AuxInt = int32ToAuxInt(off)
21032 v0.Aux = symToAux(sym)
21033 v0.AddArg2(ptr, mem)
21034 v.AddArg(v0)
21035 return true
21036 }
21037 return false
21038 }
21039 func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool {
21040 v_2 := v.Args[2]
21041 v_1 := v.Args[1]
21042 v_0 := v.Args[0]
21043 b := v.Block
21044 typ := &b.Func.Config.Types
21045
21046
21047 for {
21048 off := auxIntToInt32(v.AuxInt)
21049 sym := auxToSym(v.Aux)
21050 ptr := v_0
21051 if v_1.Op != OpAMD64MOVQconst {
21052 break
21053 }
21054 c := auxIntToInt64(v_1.AuxInt)
21055 mem := v_2
21056 v.reset(OpAMD64SHLQconst)
21057 v.AuxInt = int8ToAuxInt(int8(c & 63))
21058 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21059 v0.AuxInt = int32ToAuxInt(off)
21060 v0.Aux = symToAux(sym)
21061 v0.AddArg2(ptr, mem)
21062 v.AddArg(v0)
21063 return true
21064 }
21065
21066
21067 for {
21068 off := auxIntToInt32(v.AuxInt)
21069 sym := auxToSym(v.Aux)
21070 ptr := v_0
21071 if v_1.Op != OpAMD64MOVLconst {
21072 break
21073 }
21074 c := auxIntToInt32(v_1.AuxInt)
21075 mem := v_2
21076 v.reset(OpAMD64SHLQconst)
21077 v.AuxInt = int8ToAuxInt(int8(c & 63))
21078 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21079 v0.AuxInt = int32ToAuxInt(off)
21080 v0.Aux = symToAux(sym)
21081 v0.AddArg2(ptr, mem)
21082 v.AddArg(v0)
21083 return true
21084 }
21085 return false
21086 }
21087 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
21088 v_1 := v.Args[1]
21089 v_0 := v.Args[0]
21090
21091
21092
21093 for {
21094 x := v_0
21095 if v_1.Op != OpAMD64MOVQconst {
21096 break
21097 }
21098 c := auxIntToInt64(v_1.AuxInt)
21099 if !(c&31 < 8) {
21100 break
21101 }
21102 v.reset(OpAMD64SHRBconst)
21103 v.AuxInt = int8ToAuxInt(int8(c & 31))
21104 v.AddArg(x)
21105 return true
21106 }
21107
21108
21109
21110 for {
21111 x := v_0
21112 if v_1.Op != OpAMD64MOVLconst {
21113 break
21114 }
21115 c := auxIntToInt32(v_1.AuxInt)
21116 if !(c&31 < 8) {
21117 break
21118 }
21119 v.reset(OpAMD64SHRBconst)
21120 v.AuxInt = int8ToAuxInt(int8(c & 31))
21121 v.AddArg(x)
21122 return true
21123 }
21124
21125
21126
21127 for {
21128 if v_1.Op != OpAMD64MOVQconst {
21129 break
21130 }
21131 c := auxIntToInt64(v_1.AuxInt)
21132 if !(c&31 >= 8) {
21133 break
21134 }
21135 v.reset(OpAMD64MOVLconst)
21136 v.AuxInt = int32ToAuxInt(0)
21137 return true
21138 }
21139
21140
21141
21142 for {
21143 if v_1.Op != OpAMD64MOVLconst {
21144 break
21145 }
21146 c := auxIntToInt32(v_1.AuxInt)
21147 if !(c&31 >= 8) {
21148 break
21149 }
21150 v.reset(OpAMD64MOVLconst)
21151 v.AuxInt = int32ToAuxInt(0)
21152 return true
21153 }
21154 return false
21155 }
21156 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
21157 v_0 := v.Args[0]
21158
21159
21160 for {
21161 if auxIntToInt8(v.AuxInt) != 0 {
21162 break
21163 }
21164 x := v_0
21165 v.copyOf(x)
21166 return true
21167 }
21168 return false
21169 }
21170 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
21171 v_1 := v.Args[1]
21172 v_0 := v.Args[0]
21173 b := v.Block
21174
21175
21176 for {
21177 x := v_0
21178 if v_1.Op != OpAMD64MOVQconst {
21179 break
21180 }
21181 c := auxIntToInt64(v_1.AuxInt)
21182 v.reset(OpAMD64SHRLconst)
21183 v.AuxInt = int8ToAuxInt(int8(c & 31))
21184 v.AddArg(x)
21185 return true
21186 }
21187
21188
21189 for {
21190 x := v_0
21191 if v_1.Op != OpAMD64MOVLconst {
21192 break
21193 }
21194 c := auxIntToInt32(v_1.AuxInt)
21195 v.reset(OpAMD64SHRLconst)
21196 v.AuxInt = int8ToAuxInt(int8(c & 31))
21197 v.AddArg(x)
21198 return true
21199 }
21200
21201
21202
21203 for {
21204 x := v_0
21205 if v_1.Op != OpAMD64ADDQconst {
21206 break
21207 }
21208 c := auxIntToInt32(v_1.AuxInt)
21209 y := v_1.Args[0]
21210 if !(c&31 == 0) {
21211 break
21212 }
21213 v.reset(OpAMD64SHRL)
21214 v.AddArg2(x, y)
21215 return true
21216 }
21217
21218
21219
21220 for {
21221 x := v_0
21222 if v_1.Op != OpAMD64NEGQ {
21223 break
21224 }
21225 t := v_1.Type
21226 v_1_0 := v_1.Args[0]
21227 if v_1_0.Op != OpAMD64ADDQconst {
21228 break
21229 }
21230 c := auxIntToInt32(v_1_0.AuxInt)
21231 y := v_1_0.Args[0]
21232 if !(c&31 == 0) {
21233 break
21234 }
21235 v.reset(OpAMD64SHRL)
21236 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21237 v0.AddArg(y)
21238 v.AddArg2(x, v0)
21239 return true
21240 }
21241
21242
21243
21244 for {
21245 x := v_0
21246 if v_1.Op != OpAMD64ANDQconst {
21247 break
21248 }
21249 c := auxIntToInt32(v_1.AuxInt)
21250 y := v_1.Args[0]
21251 if !(c&31 == 31) {
21252 break
21253 }
21254 v.reset(OpAMD64SHRL)
21255 v.AddArg2(x, y)
21256 return true
21257 }
21258
21259
21260
21261 for {
21262 x := v_0
21263 if v_1.Op != OpAMD64NEGQ {
21264 break
21265 }
21266 t := v_1.Type
21267 v_1_0 := v_1.Args[0]
21268 if v_1_0.Op != OpAMD64ANDQconst {
21269 break
21270 }
21271 c := auxIntToInt32(v_1_0.AuxInt)
21272 y := v_1_0.Args[0]
21273 if !(c&31 == 31) {
21274 break
21275 }
21276 v.reset(OpAMD64SHRL)
21277 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21278 v0.AddArg(y)
21279 v.AddArg2(x, v0)
21280 return true
21281 }
21282
21283
21284
21285 for {
21286 x := v_0
21287 if v_1.Op != OpAMD64ADDLconst {
21288 break
21289 }
21290 c := auxIntToInt32(v_1.AuxInt)
21291 y := v_1.Args[0]
21292 if !(c&31 == 0) {
21293 break
21294 }
21295 v.reset(OpAMD64SHRL)
21296 v.AddArg2(x, y)
21297 return true
21298 }
21299
21300
21301
21302 for {
21303 x := v_0
21304 if v_1.Op != OpAMD64NEGL {
21305 break
21306 }
21307 t := v_1.Type
21308 v_1_0 := v_1.Args[0]
21309 if v_1_0.Op != OpAMD64ADDLconst {
21310 break
21311 }
21312 c := auxIntToInt32(v_1_0.AuxInt)
21313 y := v_1_0.Args[0]
21314 if !(c&31 == 0) {
21315 break
21316 }
21317 v.reset(OpAMD64SHRL)
21318 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21319 v0.AddArg(y)
21320 v.AddArg2(x, v0)
21321 return true
21322 }
21323
21324
21325
21326 for {
21327 x := v_0
21328 if v_1.Op != OpAMD64ANDLconst {
21329 break
21330 }
21331 c := auxIntToInt32(v_1.AuxInt)
21332 y := v_1.Args[0]
21333 if !(c&31 == 31) {
21334 break
21335 }
21336 v.reset(OpAMD64SHRL)
21337 v.AddArg2(x, y)
21338 return true
21339 }
21340
21341
21342
21343 for {
21344 x := v_0
21345 if v_1.Op != OpAMD64NEGL {
21346 break
21347 }
21348 t := v_1.Type
21349 v_1_0 := v_1.Args[0]
21350 if v_1_0.Op != OpAMD64ANDLconst {
21351 break
21352 }
21353 c := auxIntToInt32(v_1_0.AuxInt)
21354 y := v_1_0.Args[0]
21355 if !(c&31 == 31) {
21356 break
21357 }
21358 v.reset(OpAMD64SHRL)
21359 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21360 v0.AddArg(y)
21361 v.AddArg2(x, v0)
21362 return true
21363 }
21364
21365
21366
21367 for {
21368 l := v_0
21369 if l.Op != OpAMD64MOVLload {
21370 break
21371 }
21372 off := auxIntToInt32(l.AuxInt)
21373 sym := auxToSym(l.Aux)
21374 mem := l.Args[1]
21375 ptr := l.Args[0]
21376 x := v_1
21377 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21378 break
21379 }
21380 v.reset(OpAMD64SHRXLload)
21381 v.AuxInt = int32ToAuxInt(off)
21382 v.Aux = symToAux(sym)
21383 v.AddArg3(ptr, x, mem)
21384 return true
21385 }
21386 return false
21387 }
21388 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
21389 v_0 := v.Args[0]
21390
21391
21392 for {
21393 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
21394 break
21395 }
21396 x := v_0.Args[0]
21397 v.reset(OpAMD64ANDLconst)
21398 v.AuxInt = int32ToAuxInt(0x7fffffff)
21399 v.AddArg(x)
21400 return true
21401 }
21402
21403
21404 for {
21405 if auxIntToInt8(v.AuxInt) != 0 {
21406 break
21407 }
21408 x := v_0
21409 v.copyOf(x)
21410 return true
21411 }
21412 return false
21413 }
21414 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
21415 v_1 := v.Args[1]
21416 v_0 := v.Args[0]
21417 b := v.Block
21418
21419
21420 for {
21421 x := v_0
21422 if v_1.Op != OpAMD64MOVQconst {
21423 break
21424 }
21425 c := auxIntToInt64(v_1.AuxInt)
21426 v.reset(OpAMD64SHRQconst)
21427 v.AuxInt = int8ToAuxInt(int8(c & 63))
21428 v.AddArg(x)
21429 return true
21430 }
21431
21432
21433 for {
21434 x := v_0
21435 if v_1.Op != OpAMD64MOVLconst {
21436 break
21437 }
21438 c := auxIntToInt32(v_1.AuxInt)
21439 v.reset(OpAMD64SHRQconst)
21440 v.AuxInt = int8ToAuxInt(int8(c & 63))
21441 v.AddArg(x)
21442 return true
21443 }
21444
21445
21446
21447 for {
21448 x := v_0
21449 if v_1.Op != OpAMD64ADDQconst {
21450 break
21451 }
21452 c := auxIntToInt32(v_1.AuxInt)
21453 y := v_1.Args[0]
21454 if !(c&63 == 0) {
21455 break
21456 }
21457 v.reset(OpAMD64SHRQ)
21458 v.AddArg2(x, y)
21459 return true
21460 }
21461
21462
21463
21464 for {
21465 x := v_0
21466 if v_1.Op != OpAMD64NEGQ {
21467 break
21468 }
21469 t := v_1.Type
21470 v_1_0 := v_1.Args[0]
21471 if v_1_0.Op != OpAMD64ADDQconst {
21472 break
21473 }
21474 c := auxIntToInt32(v_1_0.AuxInt)
21475 y := v_1_0.Args[0]
21476 if !(c&63 == 0) {
21477 break
21478 }
21479 v.reset(OpAMD64SHRQ)
21480 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21481 v0.AddArg(y)
21482 v.AddArg2(x, v0)
21483 return true
21484 }
21485
21486
21487
21488 for {
21489 x := v_0
21490 if v_1.Op != OpAMD64ANDQconst {
21491 break
21492 }
21493 c := auxIntToInt32(v_1.AuxInt)
21494 y := v_1.Args[0]
21495 if !(c&63 == 63) {
21496 break
21497 }
21498 v.reset(OpAMD64SHRQ)
21499 v.AddArg2(x, y)
21500 return true
21501 }
21502
21503
21504
21505 for {
21506 x := v_0
21507 if v_1.Op != OpAMD64NEGQ {
21508 break
21509 }
21510 t := v_1.Type
21511 v_1_0 := v_1.Args[0]
21512 if v_1_0.Op != OpAMD64ANDQconst {
21513 break
21514 }
21515 c := auxIntToInt32(v_1_0.AuxInt)
21516 y := v_1_0.Args[0]
21517 if !(c&63 == 63) {
21518 break
21519 }
21520 v.reset(OpAMD64SHRQ)
21521 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21522 v0.AddArg(y)
21523 v.AddArg2(x, v0)
21524 return true
21525 }
21526
21527
21528
21529 for {
21530 x := v_0
21531 if v_1.Op != OpAMD64ADDLconst {
21532 break
21533 }
21534 c := auxIntToInt32(v_1.AuxInt)
21535 y := v_1.Args[0]
21536 if !(c&63 == 0) {
21537 break
21538 }
21539 v.reset(OpAMD64SHRQ)
21540 v.AddArg2(x, y)
21541 return true
21542 }
21543
21544
21545
21546 for {
21547 x := v_0
21548 if v_1.Op != OpAMD64NEGL {
21549 break
21550 }
21551 t := v_1.Type
21552 v_1_0 := v_1.Args[0]
21553 if v_1_0.Op != OpAMD64ADDLconst {
21554 break
21555 }
21556 c := auxIntToInt32(v_1_0.AuxInt)
21557 y := v_1_0.Args[0]
21558 if !(c&63 == 0) {
21559 break
21560 }
21561 v.reset(OpAMD64SHRQ)
21562 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21563 v0.AddArg(y)
21564 v.AddArg2(x, v0)
21565 return true
21566 }
21567
21568
21569
21570 for {
21571 x := v_0
21572 if v_1.Op != OpAMD64ANDLconst {
21573 break
21574 }
21575 c := auxIntToInt32(v_1.AuxInt)
21576 y := v_1.Args[0]
21577 if !(c&63 == 63) {
21578 break
21579 }
21580 v.reset(OpAMD64SHRQ)
21581 v.AddArg2(x, y)
21582 return true
21583 }
21584
21585
21586
21587 for {
21588 x := v_0
21589 if v_1.Op != OpAMD64NEGL {
21590 break
21591 }
21592 t := v_1.Type
21593 v_1_0 := v_1.Args[0]
21594 if v_1_0.Op != OpAMD64ANDLconst {
21595 break
21596 }
21597 c := auxIntToInt32(v_1_0.AuxInt)
21598 y := v_1_0.Args[0]
21599 if !(c&63 == 63) {
21600 break
21601 }
21602 v.reset(OpAMD64SHRQ)
21603 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21604 v0.AddArg(y)
21605 v.AddArg2(x, v0)
21606 return true
21607 }
21608
21609
21610
21611 for {
21612 l := v_0
21613 if l.Op != OpAMD64MOVQload {
21614 break
21615 }
21616 off := auxIntToInt32(l.AuxInt)
21617 sym := auxToSym(l.Aux)
21618 mem := l.Args[1]
21619 ptr := l.Args[0]
21620 x := v_1
21621 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21622 break
21623 }
21624 v.reset(OpAMD64SHRXQload)
21625 v.AuxInt = int32ToAuxInt(off)
21626 v.Aux = symToAux(sym)
21627 v.AddArg3(ptr, x, mem)
21628 return true
21629 }
21630 return false
21631 }
21632 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
21633 v_0 := v.Args[0]
21634
21635
21636 for {
21637 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
21638 break
21639 }
21640 x := v_0.Args[0]
21641 v.reset(OpAMD64BTRQconst)
21642 v.AuxInt = int8ToAuxInt(63)
21643 v.AddArg(x)
21644 return true
21645 }
21646
21647
21648 for {
21649 if auxIntToInt8(v.AuxInt) != 0 {
21650 break
21651 }
21652 x := v_0
21653 v.copyOf(x)
21654 return true
21655 }
21656 return false
21657 }
21658 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
21659 v_1 := v.Args[1]
21660 v_0 := v.Args[0]
21661
21662
21663
21664 for {
21665 x := v_0
21666 if v_1.Op != OpAMD64MOVQconst {
21667 break
21668 }
21669 c := auxIntToInt64(v_1.AuxInt)
21670 if !(c&31 < 16) {
21671 break
21672 }
21673 v.reset(OpAMD64SHRWconst)
21674 v.AuxInt = int8ToAuxInt(int8(c & 31))
21675 v.AddArg(x)
21676 return true
21677 }
21678
21679
21680
21681 for {
21682 x := v_0
21683 if v_1.Op != OpAMD64MOVLconst {
21684 break
21685 }
21686 c := auxIntToInt32(v_1.AuxInt)
21687 if !(c&31 < 16) {
21688 break
21689 }
21690 v.reset(OpAMD64SHRWconst)
21691 v.AuxInt = int8ToAuxInt(int8(c & 31))
21692 v.AddArg(x)
21693 return true
21694 }
21695
21696
21697
21698 for {
21699 if v_1.Op != OpAMD64MOVQconst {
21700 break
21701 }
21702 c := auxIntToInt64(v_1.AuxInt)
21703 if !(c&31 >= 16) {
21704 break
21705 }
21706 v.reset(OpAMD64MOVLconst)
21707 v.AuxInt = int32ToAuxInt(0)
21708 return true
21709 }
21710
21711
21712
21713 for {
21714 if v_1.Op != OpAMD64MOVLconst {
21715 break
21716 }
21717 c := auxIntToInt32(v_1.AuxInt)
21718 if !(c&31 >= 16) {
21719 break
21720 }
21721 v.reset(OpAMD64MOVLconst)
21722 v.AuxInt = int32ToAuxInt(0)
21723 return true
21724 }
21725 return false
21726 }
21727 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
21728 v_0 := v.Args[0]
21729
21730
21731 for {
21732 if auxIntToInt8(v.AuxInt) != 0 {
21733 break
21734 }
21735 x := v_0
21736 v.copyOf(x)
21737 return true
21738 }
21739 return false
21740 }
21741 func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool {
21742 v_2 := v.Args[2]
21743 v_1 := v.Args[1]
21744 v_0 := v.Args[0]
21745 b := v.Block
21746 typ := &b.Func.Config.Types
21747
21748
21749 for {
21750 off := auxIntToInt32(v.AuxInt)
21751 sym := auxToSym(v.Aux)
21752 ptr := v_0
21753 if v_1.Op != OpAMD64MOVLconst {
21754 break
21755 }
21756 c := auxIntToInt32(v_1.AuxInt)
21757 mem := v_2
21758 v.reset(OpAMD64SHRLconst)
21759 v.AuxInt = int8ToAuxInt(int8(c & 31))
21760 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
21761 v0.AuxInt = int32ToAuxInt(off)
21762 v0.Aux = symToAux(sym)
21763 v0.AddArg2(ptr, mem)
21764 v.AddArg(v0)
21765 return true
21766 }
21767 return false
21768 }
21769 func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool {
21770 v_2 := v.Args[2]
21771 v_1 := v.Args[1]
21772 v_0 := v.Args[0]
21773 b := v.Block
21774 typ := &b.Func.Config.Types
21775
21776
21777 for {
21778 off := auxIntToInt32(v.AuxInt)
21779 sym := auxToSym(v.Aux)
21780 ptr := v_0
21781 if v_1.Op != OpAMD64MOVQconst {
21782 break
21783 }
21784 c := auxIntToInt64(v_1.AuxInt)
21785 mem := v_2
21786 v.reset(OpAMD64SHRQconst)
21787 v.AuxInt = int8ToAuxInt(int8(c & 63))
21788 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21789 v0.AuxInt = int32ToAuxInt(off)
21790 v0.Aux = symToAux(sym)
21791 v0.AddArg2(ptr, mem)
21792 v.AddArg(v0)
21793 return true
21794 }
21795
21796
21797 for {
21798 off := auxIntToInt32(v.AuxInt)
21799 sym := auxToSym(v.Aux)
21800 ptr := v_0
21801 if v_1.Op != OpAMD64MOVLconst {
21802 break
21803 }
21804 c := auxIntToInt32(v_1.AuxInt)
21805 mem := v_2
21806 v.reset(OpAMD64SHRQconst)
21807 v.AuxInt = int8ToAuxInt(int8(c & 63))
21808 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21809 v0.AuxInt = int32ToAuxInt(off)
21810 v0.Aux = symToAux(sym)
21811 v0.AddArg2(ptr, mem)
21812 v.AddArg(v0)
21813 return true
21814 }
21815 return false
21816 }
21817 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
21818 v_1 := v.Args[1]
21819 v_0 := v.Args[0]
21820 b := v.Block
21821
21822
21823 for {
21824 x := v_0
21825 if v_1.Op != OpAMD64MOVLconst {
21826 break
21827 }
21828 c := auxIntToInt32(v_1.AuxInt)
21829 v.reset(OpAMD64SUBLconst)
21830 v.AuxInt = int32ToAuxInt(c)
21831 v.AddArg(x)
21832 return true
21833 }
21834
21835
21836 for {
21837 if v_0.Op != OpAMD64MOVLconst {
21838 break
21839 }
21840 c := auxIntToInt32(v_0.AuxInt)
21841 x := v_1
21842 v.reset(OpAMD64NEGL)
21843 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
21844 v0.AuxInt = int32ToAuxInt(c)
21845 v0.AddArg(x)
21846 v.AddArg(v0)
21847 return true
21848 }
21849
21850
21851 for {
21852 x := v_0
21853 if x != v_1 {
21854 break
21855 }
21856 v.reset(OpAMD64MOVLconst)
21857 v.AuxInt = int32ToAuxInt(0)
21858 return true
21859 }
21860
21861
21862
21863 for {
21864 x := v_0
21865 l := v_1
21866 if l.Op != OpAMD64MOVLload {
21867 break
21868 }
21869 off := auxIntToInt32(l.AuxInt)
21870 sym := auxToSym(l.Aux)
21871 mem := l.Args[1]
21872 ptr := l.Args[0]
21873 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
21874 break
21875 }
21876 v.reset(OpAMD64SUBLload)
21877 v.AuxInt = int32ToAuxInt(off)
21878 v.Aux = symToAux(sym)
21879 v.AddArg3(x, ptr, mem)
21880 return true
21881 }
21882 return false
21883 }
21884 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
21885 v_0 := v.Args[0]
21886
21887
21888
21889 for {
21890 c := auxIntToInt32(v.AuxInt)
21891 x := v_0
21892 if !(c == 0) {
21893 break
21894 }
21895 v.copyOf(x)
21896 return true
21897 }
21898
21899
21900 for {
21901 c := auxIntToInt32(v.AuxInt)
21902 x := v_0
21903 v.reset(OpAMD64ADDLconst)
21904 v.AuxInt = int32ToAuxInt(-c)
21905 v.AddArg(x)
21906 return true
21907 }
21908 }
21909 func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
21910 v_2 := v.Args[2]
21911 v_1 := v.Args[1]
21912 v_0 := v.Args[0]
21913 b := v.Block
21914 typ := &b.Func.Config.Types
21915
21916
21917
21918 for {
21919 off1 := auxIntToInt32(v.AuxInt)
21920 sym := auxToSym(v.Aux)
21921 val := v_0
21922 if v_1.Op != OpAMD64ADDQconst {
21923 break
21924 }
21925 off2 := auxIntToInt32(v_1.AuxInt)
21926 base := v_1.Args[0]
21927 mem := v_2
21928 if !(is32Bit(int64(off1) + int64(off2))) {
21929 break
21930 }
21931 v.reset(OpAMD64SUBLload)
21932 v.AuxInt = int32ToAuxInt(off1 + off2)
21933 v.Aux = symToAux(sym)
21934 v.AddArg3(val, base, mem)
21935 return true
21936 }
21937
21938
21939
21940 for {
21941 off1 := auxIntToInt32(v.AuxInt)
21942 sym1 := auxToSym(v.Aux)
21943 val := v_0
21944 if v_1.Op != OpAMD64LEAQ {
21945 break
21946 }
21947 off2 := auxIntToInt32(v_1.AuxInt)
21948 sym2 := auxToSym(v_1.Aux)
21949 base := v_1.Args[0]
21950 mem := v_2
21951 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21952 break
21953 }
21954 v.reset(OpAMD64SUBLload)
21955 v.AuxInt = int32ToAuxInt(off1 + off2)
21956 v.Aux = symToAux(mergeSym(sym1, sym2))
21957 v.AddArg3(val, base, mem)
21958 return true
21959 }
21960
21961
21962 for {
21963 off := auxIntToInt32(v.AuxInt)
21964 sym := auxToSym(v.Aux)
21965 x := v_0
21966 ptr := v_1
21967 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
21968 break
21969 }
21970 y := v_2.Args[1]
21971 if ptr != v_2.Args[0] {
21972 break
21973 }
21974 v.reset(OpAMD64SUBL)
21975 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
21976 v0.AddArg(y)
21977 v.AddArg2(x, v0)
21978 return true
21979 }
21980 return false
21981 }
21982 func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
21983 v_2 := v.Args[2]
21984 v_1 := v.Args[1]
21985 v_0 := v.Args[0]
21986
21987
21988
21989 for {
21990 off1 := auxIntToInt32(v.AuxInt)
21991 sym := auxToSym(v.Aux)
21992 if v_0.Op != OpAMD64ADDQconst {
21993 break
21994 }
21995 off2 := auxIntToInt32(v_0.AuxInt)
21996 base := v_0.Args[0]
21997 val := v_1
21998 mem := v_2
21999 if !(is32Bit(int64(off1) + int64(off2))) {
22000 break
22001 }
22002 v.reset(OpAMD64SUBLmodify)
22003 v.AuxInt = int32ToAuxInt(off1 + off2)
22004 v.Aux = symToAux(sym)
22005 v.AddArg3(base, val, mem)
22006 return true
22007 }
22008
22009
22010
22011 for {
22012 off1 := auxIntToInt32(v.AuxInt)
22013 sym1 := auxToSym(v.Aux)
22014 if v_0.Op != OpAMD64LEAQ {
22015 break
22016 }
22017 off2 := auxIntToInt32(v_0.AuxInt)
22018 sym2 := auxToSym(v_0.Aux)
22019 base := v_0.Args[0]
22020 val := v_1
22021 mem := v_2
22022 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22023 break
22024 }
22025 v.reset(OpAMD64SUBLmodify)
22026 v.AuxInt = int32ToAuxInt(off1 + off2)
22027 v.Aux = symToAux(mergeSym(sym1, sym2))
22028 v.AddArg3(base, val, mem)
22029 return true
22030 }
22031 return false
22032 }
22033 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
22034 v_1 := v.Args[1]
22035 v_0 := v.Args[0]
22036 b := v.Block
22037
22038
22039
22040 for {
22041 x := v_0
22042 if v_1.Op != OpAMD64MOVQconst {
22043 break
22044 }
22045 c := auxIntToInt64(v_1.AuxInt)
22046 if !(is32Bit(c)) {
22047 break
22048 }
22049 v.reset(OpAMD64SUBQconst)
22050 v.AuxInt = int32ToAuxInt(int32(c))
22051 v.AddArg(x)
22052 return true
22053 }
22054
22055
22056
22057 for {
22058 if v_0.Op != OpAMD64MOVQconst {
22059 break
22060 }
22061 c := auxIntToInt64(v_0.AuxInt)
22062 x := v_1
22063 if !(is32Bit(c)) {
22064 break
22065 }
22066 v.reset(OpAMD64NEGQ)
22067 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
22068 v0.AuxInt = int32ToAuxInt(int32(c))
22069 v0.AddArg(x)
22070 v.AddArg(v0)
22071 return true
22072 }
22073
22074
22075 for {
22076 x := v_0
22077 if x != v_1 {
22078 break
22079 }
22080 v.reset(OpAMD64MOVQconst)
22081 v.AuxInt = int64ToAuxInt(0)
22082 return true
22083 }
22084
22085
22086
22087 for {
22088 x := v_0
22089 l := v_1
22090 if l.Op != OpAMD64MOVQload {
22091 break
22092 }
22093 off := auxIntToInt32(l.AuxInt)
22094 sym := auxToSym(l.Aux)
22095 mem := l.Args[1]
22096 ptr := l.Args[0]
22097 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22098 break
22099 }
22100 v.reset(OpAMD64SUBQload)
22101 v.AuxInt = int32ToAuxInt(off)
22102 v.Aux = symToAux(sym)
22103 v.AddArg3(x, ptr, mem)
22104 return true
22105 }
22106 return false
22107 }
22108 func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
22109 v_1 := v.Args[1]
22110 v_0 := v.Args[0]
22111
22112
22113
22114 for {
22115 x := v_0
22116 if v_1.Op != OpAMD64MOVQconst {
22117 break
22118 }
22119 c := auxIntToInt64(v_1.AuxInt)
22120 if !(is32Bit(c)) {
22121 break
22122 }
22123 v.reset(OpAMD64SUBQconstborrow)
22124 v.AuxInt = int32ToAuxInt(int32(c))
22125 v.AddArg(x)
22126 return true
22127 }
22128 return false
22129 }
22130 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
22131 v_0 := v.Args[0]
22132
22133
22134 for {
22135 if auxIntToInt32(v.AuxInt) != 0 {
22136 break
22137 }
22138 x := v_0
22139 v.copyOf(x)
22140 return true
22141 }
22142
22143
22144
22145 for {
22146 c := auxIntToInt32(v.AuxInt)
22147 x := v_0
22148 if !(c != -(1 << 31)) {
22149 break
22150 }
22151 v.reset(OpAMD64ADDQconst)
22152 v.AuxInt = int32ToAuxInt(-c)
22153 v.AddArg(x)
22154 return true
22155 }
22156
22157
22158 for {
22159 c := auxIntToInt32(v.AuxInt)
22160 if v_0.Op != OpAMD64MOVQconst {
22161 break
22162 }
22163 d := auxIntToInt64(v_0.AuxInt)
22164 v.reset(OpAMD64MOVQconst)
22165 v.AuxInt = int64ToAuxInt(d - int64(c))
22166 return true
22167 }
22168
22169
22170
22171 for {
22172 c := auxIntToInt32(v.AuxInt)
22173 if v_0.Op != OpAMD64SUBQconst {
22174 break
22175 }
22176 d := auxIntToInt32(v_0.AuxInt)
22177 x := v_0.Args[0]
22178 if !(is32Bit(int64(-c) - int64(d))) {
22179 break
22180 }
22181 v.reset(OpAMD64ADDQconst)
22182 v.AuxInt = int32ToAuxInt(-c - d)
22183 v.AddArg(x)
22184 return true
22185 }
22186 return false
22187 }
22188 func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
22189 v_2 := v.Args[2]
22190 v_1 := v.Args[1]
22191 v_0 := v.Args[0]
22192 b := v.Block
22193 typ := &b.Func.Config.Types
22194
22195
22196
22197 for {
22198 off1 := auxIntToInt32(v.AuxInt)
22199 sym := auxToSym(v.Aux)
22200 val := v_0
22201 if v_1.Op != OpAMD64ADDQconst {
22202 break
22203 }
22204 off2 := auxIntToInt32(v_1.AuxInt)
22205 base := v_1.Args[0]
22206 mem := v_2
22207 if !(is32Bit(int64(off1) + int64(off2))) {
22208 break
22209 }
22210 v.reset(OpAMD64SUBQload)
22211 v.AuxInt = int32ToAuxInt(off1 + off2)
22212 v.Aux = symToAux(sym)
22213 v.AddArg3(val, base, mem)
22214 return true
22215 }
22216
22217
22218
22219 for {
22220 off1 := auxIntToInt32(v.AuxInt)
22221 sym1 := auxToSym(v.Aux)
22222 val := v_0
22223 if v_1.Op != OpAMD64LEAQ {
22224 break
22225 }
22226 off2 := auxIntToInt32(v_1.AuxInt)
22227 sym2 := auxToSym(v_1.Aux)
22228 base := v_1.Args[0]
22229 mem := v_2
22230 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22231 break
22232 }
22233 v.reset(OpAMD64SUBQload)
22234 v.AuxInt = int32ToAuxInt(off1 + off2)
22235 v.Aux = symToAux(mergeSym(sym1, sym2))
22236 v.AddArg3(val, base, mem)
22237 return true
22238 }
22239
22240
22241 for {
22242 off := auxIntToInt32(v.AuxInt)
22243 sym := auxToSym(v.Aux)
22244 x := v_0
22245 ptr := v_1
22246 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22247 break
22248 }
22249 y := v_2.Args[1]
22250 if ptr != v_2.Args[0] {
22251 break
22252 }
22253 v.reset(OpAMD64SUBQ)
22254 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
22255 v0.AddArg(y)
22256 v.AddArg2(x, v0)
22257 return true
22258 }
22259 return false
22260 }
22261 func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
22262 v_2 := v.Args[2]
22263 v_1 := v.Args[1]
22264 v_0 := v.Args[0]
22265
22266
22267
22268 for {
22269 off1 := auxIntToInt32(v.AuxInt)
22270 sym := auxToSym(v.Aux)
22271 if v_0.Op != OpAMD64ADDQconst {
22272 break
22273 }
22274 off2 := auxIntToInt32(v_0.AuxInt)
22275 base := v_0.Args[0]
22276 val := v_1
22277 mem := v_2
22278 if !(is32Bit(int64(off1) + int64(off2))) {
22279 break
22280 }
22281 v.reset(OpAMD64SUBQmodify)
22282 v.AuxInt = int32ToAuxInt(off1 + off2)
22283 v.Aux = symToAux(sym)
22284 v.AddArg3(base, val, mem)
22285 return true
22286 }
22287
22288
22289
22290 for {
22291 off1 := auxIntToInt32(v.AuxInt)
22292 sym1 := auxToSym(v.Aux)
22293 if v_0.Op != OpAMD64LEAQ {
22294 break
22295 }
22296 off2 := auxIntToInt32(v_0.AuxInt)
22297 sym2 := auxToSym(v_0.Aux)
22298 base := v_0.Args[0]
22299 val := v_1
22300 mem := v_2
22301 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22302 break
22303 }
22304 v.reset(OpAMD64SUBQmodify)
22305 v.AuxInt = int32ToAuxInt(off1 + off2)
22306 v.Aux = symToAux(mergeSym(sym1, sym2))
22307 v.AddArg3(base, val, mem)
22308 return true
22309 }
22310 return false
22311 }
22312 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
22313 v_1 := v.Args[1]
22314 v_0 := v.Args[0]
22315
22316
22317
22318 for {
22319 x := v_0
22320 l := v_1
22321 if l.Op != OpAMD64MOVSDload {
22322 break
22323 }
22324 off := auxIntToInt32(l.AuxInt)
22325 sym := auxToSym(l.Aux)
22326 mem := l.Args[1]
22327 ptr := l.Args[0]
22328 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22329 break
22330 }
22331 v.reset(OpAMD64SUBSDload)
22332 v.AuxInt = int32ToAuxInt(off)
22333 v.Aux = symToAux(sym)
22334 v.AddArg3(x, ptr, mem)
22335 return true
22336 }
22337 return false
22338 }
22339 func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
22340 v_2 := v.Args[2]
22341 v_1 := v.Args[1]
22342 v_0 := v.Args[0]
22343 b := v.Block
22344 typ := &b.Func.Config.Types
22345
22346
22347
22348 for {
22349 off1 := auxIntToInt32(v.AuxInt)
22350 sym := auxToSym(v.Aux)
22351 val := v_0
22352 if v_1.Op != OpAMD64ADDQconst {
22353 break
22354 }
22355 off2 := auxIntToInt32(v_1.AuxInt)
22356 base := v_1.Args[0]
22357 mem := v_2
22358 if !(is32Bit(int64(off1) + int64(off2))) {
22359 break
22360 }
22361 v.reset(OpAMD64SUBSDload)
22362 v.AuxInt = int32ToAuxInt(off1 + off2)
22363 v.Aux = symToAux(sym)
22364 v.AddArg3(val, base, mem)
22365 return true
22366 }
22367
22368
22369
22370 for {
22371 off1 := auxIntToInt32(v.AuxInt)
22372 sym1 := auxToSym(v.Aux)
22373 val := v_0
22374 if v_1.Op != OpAMD64LEAQ {
22375 break
22376 }
22377 off2 := auxIntToInt32(v_1.AuxInt)
22378 sym2 := auxToSym(v_1.Aux)
22379 base := v_1.Args[0]
22380 mem := v_2
22381 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22382 break
22383 }
22384 v.reset(OpAMD64SUBSDload)
22385 v.AuxInt = int32ToAuxInt(off1 + off2)
22386 v.Aux = symToAux(mergeSym(sym1, sym2))
22387 v.AddArg3(val, base, mem)
22388 return true
22389 }
22390
22391
22392 for {
22393 off := auxIntToInt32(v.AuxInt)
22394 sym := auxToSym(v.Aux)
22395 x := v_0
22396 ptr := v_1
22397 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22398 break
22399 }
22400 y := v_2.Args[1]
22401 if ptr != v_2.Args[0] {
22402 break
22403 }
22404 v.reset(OpAMD64SUBSD)
22405 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
22406 v0.AddArg(y)
22407 v.AddArg2(x, v0)
22408 return true
22409 }
22410 return false
22411 }
22412 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
22413 v_1 := v.Args[1]
22414 v_0 := v.Args[0]
22415
22416
22417
22418 for {
22419 x := v_0
22420 l := v_1
22421 if l.Op != OpAMD64MOVSSload {
22422 break
22423 }
22424 off := auxIntToInt32(l.AuxInt)
22425 sym := auxToSym(l.Aux)
22426 mem := l.Args[1]
22427 ptr := l.Args[0]
22428 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22429 break
22430 }
22431 v.reset(OpAMD64SUBSSload)
22432 v.AuxInt = int32ToAuxInt(off)
22433 v.Aux = symToAux(sym)
22434 v.AddArg3(x, ptr, mem)
22435 return true
22436 }
22437 return false
22438 }
22439 func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
22440 v_2 := v.Args[2]
22441 v_1 := v.Args[1]
22442 v_0 := v.Args[0]
22443 b := v.Block
22444 typ := &b.Func.Config.Types
22445
22446
22447
22448 for {
22449 off1 := auxIntToInt32(v.AuxInt)
22450 sym := auxToSym(v.Aux)
22451 val := v_0
22452 if v_1.Op != OpAMD64ADDQconst {
22453 break
22454 }
22455 off2 := auxIntToInt32(v_1.AuxInt)
22456 base := v_1.Args[0]
22457 mem := v_2
22458 if !(is32Bit(int64(off1) + int64(off2))) {
22459 break
22460 }
22461 v.reset(OpAMD64SUBSSload)
22462 v.AuxInt = int32ToAuxInt(off1 + off2)
22463 v.Aux = symToAux(sym)
22464 v.AddArg3(val, base, mem)
22465 return true
22466 }
22467
22468
22469
22470 for {
22471 off1 := auxIntToInt32(v.AuxInt)
22472 sym1 := auxToSym(v.Aux)
22473 val := v_0
22474 if v_1.Op != OpAMD64LEAQ {
22475 break
22476 }
22477 off2 := auxIntToInt32(v_1.AuxInt)
22478 sym2 := auxToSym(v_1.Aux)
22479 base := v_1.Args[0]
22480 mem := v_2
22481 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22482 break
22483 }
22484 v.reset(OpAMD64SUBSSload)
22485 v.AuxInt = int32ToAuxInt(off1 + off2)
22486 v.Aux = symToAux(mergeSym(sym1, sym2))
22487 v.AddArg3(val, base, mem)
22488 return true
22489 }
22490
22491
22492 for {
22493 off := auxIntToInt32(v.AuxInt)
22494 sym := auxToSym(v.Aux)
22495 x := v_0
22496 ptr := v_1
22497 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22498 break
22499 }
22500 y := v_2.Args[1]
22501 if ptr != v_2.Args[0] {
22502 break
22503 }
22504 v.reset(OpAMD64SUBSS)
22505 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
22506 v0.AddArg(y)
22507 v.AddArg2(x, v0)
22508 return true
22509 }
22510 return false
22511 }
22512 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
22513 v_1 := v.Args[1]
22514 v_0 := v.Args[0]
22515 b := v.Block
22516
22517
22518 for {
22519 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22520 if v_0.Op != OpAMD64MOVLconst {
22521 continue
22522 }
22523 c := auxIntToInt32(v_0.AuxInt)
22524 x := v_1
22525 v.reset(OpAMD64TESTBconst)
22526 v.AuxInt = int8ToAuxInt(int8(c))
22527 v.AddArg(x)
22528 return true
22529 }
22530 break
22531 }
22532
22533
22534
22535 for {
22536 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22537 l := v_0
22538 if l.Op != OpAMD64MOVBload {
22539 continue
22540 }
22541 off := auxIntToInt32(l.AuxInt)
22542 sym := auxToSym(l.Aux)
22543 mem := l.Args[1]
22544 ptr := l.Args[0]
22545 l2 := v_1
22546 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22547 continue
22548 }
22549 b = l.Block
22550 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
22551 v.copyOf(v0)
22552 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22553 v0.Aux = symToAux(sym)
22554 v0.AddArg2(ptr, mem)
22555 return true
22556 }
22557 break
22558 }
22559 return false
22560 }
22561 func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
22562 v_0 := v.Args[0]
22563
22564
22565
22566 for {
22567 if auxIntToInt8(v.AuxInt) != -1 {
22568 break
22569 }
22570 x := v_0
22571 if !(x.Op != OpAMD64MOVLconst) {
22572 break
22573 }
22574 v.reset(OpAMD64TESTB)
22575 v.AddArg2(x, x)
22576 return true
22577 }
22578 return false
22579 }
22580 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
22581 v_1 := v.Args[1]
22582 v_0 := v.Args[0]
22583 b := v.Block
22584
22585
22586 for {
22587 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22588 if v_0.Op != OpAMD64MOVLconst {
22589 continue
22590 }
22591 c := auxIntToInt32(v_0.AuxInt)
22592 x := v_1
22593 v.reset(OpAMD64TESTLconst)
22594 v.AuxInt = int32ToAuxInt(c)
22595 v.AddArg(x)
22596 return true
22597 }
22598 break
22599 }
22600
22601
22602
22603 for {
22604 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22605 l := v_0
22606 if l.Op != OpAMD64MOVLload {
22607 continue
22608 }
22609 off := auxIntToInt32(l.AuxInt)
22610 sym := auxToSym(l.Aux)
22611 mem := l.Args[1]
22612 ptr := l.Args[0]
22613 l2 := v_1
22614 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22615 continue
22616 }
22617 b = l.Block
22618 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
22619 v.copyOf(v0)
22620 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22621 v0.Aux = symToAux(sym)
22622 v0.AddArg2(ptr, mem)
22623 return true
22624 }
22625 break
22626 }
22627
22628
22629
22630 for {
22631 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22632 a := v_0
22633 if a.Op != OpAMD64ANDLload {
22634 continue
22635 }
22636 off := auxIntToInt32(a.AuxInt)
22637 sym := auxToSym(a.Aux)
22638 mem := a.Args[2]
22639 x := a.Args[0]
22640 ptr := a.Args[1]
22641 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
22642 continue
22643 }
22644 v.reset(OpAMD64TESTL)
22645 v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
22646 v0.AuxInt = int32ToAuxInt(off)
22647 v0.Aux = symToAux(sym)
22648 v0.AddArg2(ptr, mem)
22649 v.AddArg2(v0, x)
22650 return true
22651 }
22652 break
22653 }
22654 return false
22655 }
22656 func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
22657 v_0 := v.Args[0]
22658
22659
22660
22661 for {
22662 c := auxIntToInt32(v.AuxInt)
22663 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
22664 break
22665 }
22666 v.reset(OpAMD64FlagEQ)
22667 return true
22668 }
22669
22670
22671
22672 for {
22673 c := auxIntToInt32(v.AuxInt)
22674 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
22675 break
22676 }
22677 v.reset(OpAMD64FlagLT_UGT)
22678 return true
22679 }
22680
22681
22682
22683 for {
22684 c := auxIntToInt32(v.AuxInt)
22685 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
22686 break
22687 }
22688 v.reset(OpAMD64FlagGT_UGT)
22689 return true
22690 }
22691
22692
22693
22694 for {
22695 if auxIntToInt32(v.AuxInt) != -1 {
22696 break
22697 }
22698 x := v_0
22699 if !(x.Op != OpAMD64MOVLconst) {
22700 break
22701 }
22702 v.reset(OpAMD64TESTL)
22703 v.AddArg2(x, x)
22704 return true
22705 }
22706 return false
22707 }
22708 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
22709 v_1 := v.Args[1]
22710 v_0 := v.Args[0]
22711 b := v.Block
22712
22713
22714
22715 for {
22716 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22717 if v_0.Op != OpAMD64MOVQconst {
22718 continue
22719 }
22720 c := auxIntToInt64(v_0.AuxInt)
22721 x := v_1
22722 if !(is32Bit(c)) {
22723 continue
22724 }
22725 v.reset(OpAMD64TESTQconst)
22726 v.AuxInt = int32ToAuxInt(int32(c))
22727 v.AddArg(x)
22728 return true
22729 }
22730 break
22731 }
22732
22733
22734
22735 for {
22736 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22737 l := v_0
22738 if l.Op != OpAMD64MOVQload {
22739 continue
22740 }
22741 off := auxIntToInt32(l.AuxInt)
22742 sym := auxToSym(l.Aux)
22743 mem := l.Args[1]
22744 ptr := l.Args[0]
22745 l2 := v_1
22746 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22747 continue
22748 }
22749 b = l.Block
22750 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
22751 v.copyOf(v0)
22752 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22753 v0.Aux = symToAux(sym)
22754 v0.AddArg2(ptr, mem)
22755 return true
22756 }
22757 break
22758 }
22759
22760
22761
22762 for {
22763 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22764 a := v_0
22765 if a.Op != OpAMD64ANDQload {
22766 continue
22767 }
22768 off := auxIntToInt32(a.AuxInt)
22769 sym := auxToSym(a.Aux)
22770 mem := a.Args[2]
22771 x := a.Args[0]
22772 ptr := a.Args[1]
22773 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
22774 continue
22775 }
22776 v.reset(OpAMD64TESTQ)
22777 v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
22778 v0.AuxInt = int32ToAuxInt(off)
22779 v0.Aux = symToAux(sym)
22780 v0.AddArg2(ptr, mem)
22781 v.AddArg2(v0, x)
22782 return true
22783 }
22784 break
22785 }
22786 return false
22787 }
22788 func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
22789 v_0 := v.Args[0]
22790
22791
22792
22793 for {
22794 c := auxIntToInt32(v.AuxInt)
22795 if v_0.Op != OpAMD64MOVQconst {
22796 break
22797 }
22798 d := auxIntToInt64(v_0.AuxInt)
22799 if !(int64(c) == d && c == 0) {
22800 break
22801 }
22802 v.reset(OpAMD64FlagEQ)
22803 return true
22804 }
22805
22806
22807
22808 for {
22809 c := auxIntToInt32(v.AuxInt)
22810 if v_0.Op != OpAMD64MOVQconst {
22811 break
22812 }
22813 d := auxIntToInt64(v_0.AuxInt)
22814 if !(int64(c) == d && c < 0) {
22815 break
22816 }
22817 v.reset(OpAMD64FlagLT_UGT)
22818 return true
22819 }
22820
22821
22822
22823 for {
22824 c := auxIntToInt32(v.AuxInt)
22825 if v_0.Op != OpAMD64MOVQconst {
22826 break
22827 }
22828 d := auxIntToInt64(v_0.AuxInt)
22829 if !(int64(c) == d && c > 0) {
22830 break
22831 }
22832 v.reset(OpAMD64FlagGT_UGT)
22833 return true
22834 }
22835
22836
22837
22838 for {
22839 if auxIntToInt32(v.AuxInt) != -1 {
22840 break
22841 }
22842 x := v_0
22843 if !(x.Op != OpAMD64MOVQconst) {
22844 break
22845 }
22846 v.reset(OpAMD64TESTQ)
22847 v.AddArg2(x, x)
22848 return true
22849 }
22850 return false
22851 }
22852 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
22853 v_1 := v.Args[1]
22854 v_0 := v.Args[0]
22855 b := v.Block
22856
22857
22858 for {
22859 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22860 if v_0.Op != OpAMD64MOVLconst {
22861 continue
22862 }
22863 c := auxIntToInt32(v_0.AuxInt)
22864 x := v_1
22865 v.reset(OpAMD64TESTWconst)
22866 v.AuxInt = int16ToAuxInt(int16(c))
22867 v.AddArg(x)
22868 return true
22869 }
22870 break
22871 }
22872
22873
22874
22875 for {
22876 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22877 l := v_0
22878 if l.Op != OpAMD64MOVWload {
22879 continue
22880 }
22881 off := auxIntToInt32(l.AuxInt)
22882 sym := auxToSym(l.Aux)
22883 mem := l.Args[1]
22884 ptr := l.Args[0]
22885 l2 := v_1
22886 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22887 continue
22888 }
22889 b = l.Block
22890 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
22891 v.copyOf(v0)
22892 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22893 v0.Aux = symToAux(sym)
22894 v0.AddArg2(ptr, mem)
22895 return true
22896 }
22897 break
22898 }
22899 return false
22900 }
22901 func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
22902 v_0 := v.Args[0]
22903
22904
22905
22906 for {
22907 if auxIntToInt16(v.AuxInt) != -1 {
22908 break
22909 }
22910 x := v_0
22911 if !(x.Op != OpAMD64MOVLconst) {
22912 break
22913 }
22914 v.reset(OpAMD64TESTW)
22915 v.AddArg2(x, x)
22916 return true
22917 }
22918 return false
22919 }
22920 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
22921 v_2 := v.Args[2]
22922 v_1 := v.Args[1]
22923 v_0 := v.Args[0]
22924
22925
22926
22927 for {
22928 off1 := auxIntToInt32(v.AuxInt)
22929 sym := auxToSym(v.Aux)
22930 val := v_0
22931 if v_1.Op != OpAMD64ADDQconst {
22932 break
22933 }
22934 off2 := auxIntToInt32(v_1.AuxInt)
22935 ptr := v_1.Args[0]
22936 mem := v_2
22937 if !(is32Bit(int64(off1) + int64(off2))) {
22938 break
22939 }
22940 v.reset(OpAMD64XADDLlock)
22941 v.AuxInt = int32ToAuxInt(off1 + off2)
22942 v.Aux = symToAux(sym)
22943 v.AddArg3(val, ptr, mem)
22944 return true
22945 }
22946 return false
22947 }
22948 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
22949 v_2 := v.Args[2]
22950 v_1 := v.Args[1]
22951 v_0 := v.Args[0]
22952
22953
22954
22955 for {
22956 off1 := auxIntToInt32(v.AuxInt)
22957 sym := auxToSym(v.Aux)
22958 val := v_0
22959 if v_1.Op != OpAMD64ADDQconst {
22960 break
22961 }
22962 off2 := auxIntToInt32(v_1.AuxInt)
22963 ptr := v_1.Args[0]
22964 mem := v_2
22965 if !(is32Bit(int64(off1) + int64(off2))) {
22966 break
22967 }
22968 v.reset(OpAMD64XADDQlock)
22969 v.AuxInt = int32ToAuxInt(off1 + off2)
22970 v.Aux = symToAux(sym)
22971 v.AddArg3(val, ptr, mem)
22972 return true
22973 }
22974 return false
22975 }
22976 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
22977 v_2 := v.Args[2]
22978 v_1 := v.Args[1]
22979 v_0 := v.Args[0]
22980
22981
22982
22983 for {
22984 off1 := auxIntToInt32(v.AuxInt)
22985 sym := auxToSym(v.Aux)
22986 val := v_0
22987 if v_1.Op != OpAMD64ADDQconst {
22988 break
22989 }
22990 off2 := auxIntToInt32(v_1.AuxInt)
22991 ptr := v_1.Args[0]
22992 mem := v_2
22993 if !(is32Bit(int64(off1) + int64(off2))) {
22994 break
22995 }
22996 v.reset(OpAMD64XCHGL)
22997 v.AuxInt = int32ToAuxInt(off1 + off2)
22998 v.Aux = symToAux(sym)
22999 v.AddArg3(val, ptr, mem)
23000 return true
23001 }
23002
23003
23004
23005 for {
23006 off1 := auxIntToInt32(v.AuxInt)
23007 sym1 := auxToSym(v.Aux)
23008 val := v_0
23009 if v_1.Op != OpAMD64LEAQ {
23010 break
23011 }
23012 off2 := auxIntToInt32(v_1.AuxInt)
23013 sym2 := auxToSym(v_1.Aux)
23014 ptr := v_1.Args[0]
23015 mem := v_2
23016 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
23017 break
23018 }
23019 v.reset(OpAMD64XCHGL)
23020 v.AuxInt = int32ToAuxInt(off1 + off2)
23021 v.Aux = symToAux(mergeSym(sym1, sym2))
23022 v.AddArg3(val, ptr, mem)
23023 return true
23024 }
23025 return false
23026 }
23027 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
23028 v_2 := v.Args[2]
23029 v_1 := v.Args[1]
23030 v_0 := v.Args[0]
23031
23032
23033
23034 for {
23035 off1 := auxIntToInt32(v.AuxInt)
23036 sym := auxToSym(v.Aux)
23037 val := v_0
23038 if v_1.Op != OpAMD64ADDQconst {
23039 break
23040 }
23041 off2 := auxIntToInt32(v_1.AuxInt)
23042 ptr := v_1.Args[0]
23043 mem := v_2
23044 if !(is32Bit(int64(off1) + int64(off2))) {
23045 break
23046 }
23047 v.reset(OpAMD64XCHGQ)
23048 v.AuxInt = int32ToAuxInt(off1 + off2)
23049 v.Aux = symToAux(sym)
23050 v.AddArg3(val, ptr, mem)
23051 return true
23052 }
23053
23054
23055
23056 for {
23057 off1 := auxIntToInt32(v.AuxInt)
23058 sym1 := auxToSym(v.Aux)
23059 val := v_0
23060 if v_1.Op != OpAMD64LEAQ {
23061 break
23062 }
23063 off2 := auxIntToInt32(v_1.AuxInt)
23064 sym2 := auxToSym(v_1.Aux)
23065 ptr := v_1.Args[0]
23066 mem := v_2
23067 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
23068 break
23069 }
23070 v.reset(OpAMD64XCHGQ)
23071 v.AuxInt = int32ToAuxInt(off1 + off2)
23072 v.Aux = symToAux(mergeSym(sym1, sym2))
23073 v.AddArg3(val, ptr, mem)
23074 return true
23075 }
23076 return false
23077 }
23078 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
23079 v_1 := v.Args[1]
23080 v_0 := v.Args[0]
23081
23082
23083 for {
23084 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23085 if v_0.Op != OpAMD64SHLL {
23086 continue
23087 }
23088 y := v_0.Args[1]
23089 v_0_0 := v_0.Args[0]
23090 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
23091 continue
23092 }
23093 x := v_1
23094 v.reset(OpAMD64BTCL)
23095 v.AddArg2(x, y)
23096 return true
23097 }
23098 break
23099 }
23100
23101
23102 for {
23103 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23104 x := v_0
23105 if v_1.Op != OpAMD64MOVLconst {
23106 continue
23107 }
23108 c := auxIntToInt32(v_1.AuxInt)
23109 v.reset(OpAMD64XORLconst)
23110 v.AuxInt = int32ToAuxInt(c)
23111 v.AddArg(x)
23112 return true
23113 }
23114 break
23115 }
23116
23117
23118 for {
23119 x := v_0
23120 if x != v_1 {
23121 break
23122 }
23123 v.reset(OpAMD64MOVLconst)
23124 v.AuxInt = int32ToAuxInt(0)
23125 return true
23126 }
23127
23128
23129
23130 for {
23131 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23132 x := v_0
23133 l := v_1
23134 if l.Op != OpAMD64MOVLload {
23135 continue
23136 }
23137 off := auxIntToInt32(l.AuxInt)
23138 sym := auxToSym(l.Aux)
23139 mem := l.Args[1]
23140 ptr := l.Args[0]
23141 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23142 continue
23143 }
23144 v.reset(OpAMD64XORLload)
23145 v.AuxInt = int32ToAuxInt(off)
23146 v.Aux = symToAux(sym)
23147 v.AddArg3(x, ptr, mem)
23148 return true
23149 }
23150 break
23151 }
23152
23153
23154
23155 for {
23156 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23157 x := v_0
23158 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23159 continue
23160 }
23161 v.reset(OpAMD64BLSMSKL)
23162 v.AddArg(x)
23163 return true
23164 }
23165 break
23166 }
23167 return false
23168 }
23169 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
23170 v_0 := v.Args[0]
23171
23172
23173 for {
23174 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
23175 break
23176 }
23177 x := v_0.Args[0]
23178 v.reset(OpAMD64SETEQ)
23179 v.AddArg(x)
23180 return true
23181 }
23182
23183
23184 for {
23185 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
23186 break
23187 }
23188 x := v_0.Args[0]
23189 v.reset(OpAMD64SETNE)
23190 v.AddArg(x)
23191 return true
23192 }
23193
23194
23195 for {
23196 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
23197 break
23198 }
23199 x := v_0.Args[0]
23200 v.reset(OpAMD64SETGE)
23201 v.AddArg(x)
23202 return true
23203 }
23204
23205
23206 for {
23207 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
23208 break
23209 }
23210 x := v_0.Args[0]
23211 v.reset(OpAMD64SETL)
23212 v.AddArg(x)
23213 return true
23214 }
23215
23216
23217 for {
23218 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
23219 break
23220 }
23221 x := v_0.Args[0]
23222 v.reset(OpAMD64SETG)
23223 v.AddArg(x)
23224 return true
23225 }
23226
23227
23228 for {
23229 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
23230 break
23231 }
23232 x := v_0.Args[0]
23233 v.reset(OpAMD64SETLE)
23234 v.AddArg(x)
23235 return true
23236 }
23237
23238
23239 for {
23240 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
23241 break
23242 }
23243 x := v_0.Args[0]
23244 v.reset(OpAMD64SETAE)
23245 v.AddArg(x)
23246 return true
23247 }
23248
23249
23250 for {
23251 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
23252 break
23253 }
23254 x := v_0.Args[0]
23255 v.reset(OpAMD64SETB)
23256 v.AddArg(x)
23257 return true
23258 }
23259
23260
23261 for {
23262 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
23263 break
23264 }
23265 x := v_0.Args[0]
23266 v.reset(OpAMD64SETA)
23267 v.AddArg(x)
23268 return true
23269 }
23270
23271
23272 for {
23273 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
23274 break
23275 }
23276 x := v_0.Args[0]
23277 v.reset(OpAMD64SETBE)
23278 v.AddArg(x)
23279 return true
23280 }
23281
23282
23283 for {
23284 c := auxIntToInt32(v.AuxInt)
23285 if v_0.Op != OpAMD64XORLconst {
23286 break
23287 }
23288 d := auxIntToInt32(v_0.AuxInt)
23289 x := v_0.Args[0]
23290 v.reset(OpAMD64XORLconst)
23291 v.AuxInt = int32ToAuxInt(c ^ d)
23292 v.AddArg(x)
23293 return true
23294 }
23295
23296
23297
23298 for {
23299 c := auxIntToInt32(v.AuxInt)
23300 x := v_0
23301 if !(c == 0) {
23302 break
23303 }
23304 v.copyOf(x)
23305 return true
23306 }
23307
23308
23309 for {
23310 c := auxIntToInt32(v.AuxInt)
23311 if v_0.Op != OpAMD64MOVLconst {
23312 break
23313 }
23314 d := auxIntToInt32(v_0.AuxInt)
23315 v.reset(OpAMD64MOVLconst)
23316 v.AuxInt = int32ToAuxInt(c ^ d)
23317 return true
23318 }
23319 return false
23320 }
23321 func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
23322 v_1 := v.Args[1]
23323 v_0 := v.Args[0]
23324
23325
23326
23327 for {
23328 valoff1 := auxIntToValAndOff(v.AuxInt)
23329 sym := auxToSym(v.Aux)
23330 if v_0.Op != OpAMD64ADDQconst {
23331 break
23332 }
23333 off2 := auxIntToInt32(v_0.AuxInt)
23334 base := v_0.Args[0]
23335 mem := v_1
23336 if !(ValAndOff(valoff1).canAdd32(off2)) {
23337 break
23338 }
23339 v.reset(OpAMD64XORLconstmodify)
23340 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23341 v.Aux = symToAux(sym)
23342 v.AddArg2(base, mem)
23343 return true
23344 }
23345
23346
23347
23348 for {
23349 valoff1 := auxIntToValAndOff(v.AuxInt)
23350 sym1 := auxToSym(v.Aux)
23351 if v_0.Op != OpAMD64LEAQ {
23352 break
23353 }
23354 off2 := auxIntToInt32(v_0.AuxInt)
23355 sym2 := auxToSym(v_0.Aux)
23356 base := v_0.Args[0]
23357 mem := v_1
23358 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23359 break
23360 }
23361 v.reset(OpAMD64XORLconstmodify)
23362 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23363 v.Aux = symToAux(mergeSym(sym1, sym2))
23364 v.AddArg2(base, mem)
23365 return true
23366 }
23367 return false
23368 }
23369 func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
23370 v_2 := v.Args[2]
23371 v_1 := v.Args[1]
23372 v_0 := v.Args[0]
23373 b := v.Block
23374 typ := &b.Func.Config.Types
23375
23376
23377
23378 for {
23379 off1 := auxIntToInt32(v.AuxInt)
23380 sym := auxToSym(v.Aux)
23381 val := v_0
23382 if v_1.Op != OpAMD64ADDQconst {
23383 break
23384 }
23385 off2 := auxIntToInt32(v_1.AuxInt)
23386 base := v_1.Args[0]
23387 mem := v_2
23388 if !(is32Bit(int64(off1) + int64(off2))) {
23389 break
23390 }
23391 v.reset(OpAMD64XORLload)
23392 v.AuxInt = int32ToAuxInt(off1 + off2)
23393 v.Aux = symToAux(sym)
23394 v.AddArg3(val, base, mem)
23395 return true
23396 }
23397
23398
23399
23400 for {
23401 off1 := auxIntToInt32(v.AuxInt)
23402 sym1 := auxToSym(v.Aux)
23403 val := v_0
23404 if v_1.Op != OpAMD64LEAQ {
23405 break
23406 }
23407 off2 := auxIntToInt32(v_1.AuxInt)
23408 sym2 := auxToSym(v_1.Aux)
23409 base := v_1.Args[0]
23410 mem := v_2
23411 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23412 break
23413 }
23414 v.reset(OpAMD64XORLload)
23415 v.AuxInt = int32ToAuxInt(off1 + off2)
23416 v.Aux = symToAux(mergeSym(sym1, sym2))
23417 v.AddArg3(val, base, mem)
23418 return true
23419 }
23420
23421
23422 for {
23423 off := auxIntToInt32(v.AuxInt)
23424 sym := auxToSym(v.Aux)
23425 x := v_0
23426 ptr := v_1
23427 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23428 break
23429 }
23430 y := v_2.Args[1]
23431 if ptr != v_2.Args[0] {
23432 break
23433 }
23434 v.reset(OpAMD64XORL)
23435 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
23436 v0.AddArg(y)
23437 v.AddArg2(x, v0)
23438 return true
23439 }
23440 return false
23441 }
23442 func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
23443 v_2 := v.Args[2]
23444 v_1 := v.Args[1]
23445 v_0 := v.Args[0]
23446
23447
23448
23449 for {
23450 off1 := auxIntToInt32(v.AuxInt)
23451 sym := auxToSym(v.Aux)
23452 if v_0.Op != OpAMD64ADDQconst {
23453 break
23454 }
23455 off2 := auxIntToInt32(v_0.AuxInt)
23456 base := v_0.Args[0]
23457 val := v_1
23458 mem := v_2
23459 if !(is32Bit(int64(off1) + int64(off2))) {
23460 break
23461 }
23462 v.reset(OpAMD64XORLmodify)
23463 v.AuxInt = int32ToAuxInt(off1 + off2)
23464 v.Aux = symToAux(sym)
23465 v.AddArg3(base, val, mem)
23466 return true
23467 }
23468
23469
23470
23471 for {
23472 off1 := auxIntToInt32(v.AuxInt)
23473 sym1 := auxToSym(v.Aux)
23474 if v_0.Op != OpAMD64LEAQ {
23475 break
23476 }
23477 off2 := auxIntToInt32(v_0.AuxInt)
23478 sym2 := auxToSym(v_0.Aux)
23479 base := v_0.Args[0]
23480 val := v_1
23481 mem := v_2
23482 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23483 break
23484 }
23485 v.reset(OpAMD64XORLmodify)
23486 v.AuxInt = int32ToAuxInt(off1 + off2)
23487 v.Aux = symToAux(mergeSym(sym1, sym2))
23488 v.AddArg3(base, val, mem)
23489 return true
23490 }
23491 return false
23492 }
23493 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
23494 v_1 := v.Args[1]
23495 v_0 := v.Args[0]
23496
23497
23498 for {
23499 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23500 if v_0.Op != OpAMD64SHLQ {
23501 continue
23502 }
23503 y := v_0.Args[1]
23504 v_0_0 := v_0.Args[0]
23505 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
23506 continue
23507 }
23508 x := v_1
23509 v.reset(OpAMD64BTCQ)
23510 v.AddArg2(x, y)
23511 return true
23512 }
23513 break
23514 }
23515
23516
23517
23518 for {
23519 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23520 if v_0.Op != OpAMD64MOVQconst {
23521 continue
23522 }
23523 c := auxIntToInt64(v_0.AuxInt)
23524 x := v_1
23525 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
23526 continue
23527 }
23528 v.reset(OpAMD64BTCQconst)
23529 v.AuxInt = int8ToAuxInt(int8(log64(c)))
23530 v.AddArg(x)
23531 return true
23532 }
23533 break
23534 }
23535
23536
23537
23538 for {
23539 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23540 x := v_0
23541 if v_1.Op != OpAMD64MOVQconst {
23542 continue
23543 }
23544 c := auxIntToInt64(v_1.AuxInt)
23545 if !(is32Bit(c)) {
23546 continue
23547 }
23548 v.reset(OpAMD64XORQconst)
23549 v.AuxInt = int32ToAuxInt(int32(c))
23550 v.AddArg(x)
23551 return true
23552 }
23553 break
23554 }
23555
23556
23557 for {
23558 x := v_0
23559 if x != v_1 {
23560 break
23561 }
23562 v.reset(OpAMD64MOVQconst)
23563 v.AuxInt = int64ToAuxInt(0)
23564 return true
23565 }
23566
23567
23568
23569 for {
23570 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23571 x := v_0
23572 l := v_1
23573 if l.Op != OpAMD64MOVQload {
23574 continue
23575 }
23576 off := auxIntToInt32(l.AuxInt)
23577 sym := auxToSym(l.Aux)
23578 mem := l.Args[1]
23579 ptr := l.Args[0]
23580 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23581 continue
23582 }
23583 v.reset(OpAMD64XORQload)
23584 v.AuxInt = int32ToAuxInt(off)
23585 v.Aux = symToAux(sym)
23586 v.AddArg3(x, ptr, mem)
23587 return true
23588 }
23589 break
23590 }
23591
23592
23593
23594 for {
23595 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23596 x := v_0
23597 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23598 continue
23599 }
23600 v.reset(OpAMD64BLSMSKQ)
23601 v.AddArg(x)
23602 return true
23603 }
23604 break
23605 }
23606 return false
23607 }
23608 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
23609 v_0 := v.Args[0]
23610
23611
23612 for {
23613 c := auxIntToInt32(v.AuxInt)
23614 if v_0.Op != OpAMD64XORQconst {
23615 break
23616 }
23617 d := auxIntToInt32(v_0.AuxInt)
23618 x := v_0.Args[0]
23619 v.reset(OpAMD64XORQconst)
23620 v.AuxInt = int32ToAuxInt(c ^ d)
23621 v.AddArg(x)
23622 return true
23623 }
23624
23625
23626 for {
23627 if auxIntToInt32(v.AuxInt) != 0 {
23628 break
23629 }
23630 x := v_0
23631 v.copyOf(x)
23632 return true
23633 }
23634
23635
23636 for {
23637 c := auxIntToInt32(v.AuxInt)
23638 if v_0.Op != OpAMD64MOVQconst {
23639 break
23640 }
23641 d := auxIntToInt64(v_0.AuxInt)
23642 v.reset(OpAMD64MOVQconst)
23643 v.AuxInt = int64ToAuxInt(int64(c) ^ d)
23644 return true
23645 }
23646 return false
23647 }
23648 func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
23649 v_1 := v.Args[1]
23650 v_0 := v.Args[0]
23651
23652
23653
23654 for {
23655 valoff1 := auxIntToValAndOff(v.AuxInt)
23656 sym := auxToSym(v.Aux)
23657 if v_0.Op != OpAMD64ADDQconst {
23658 break
23659 }
23660 off2 := auxIntToInt32(v_0.AuxInt)
23661 base := v_0.Args[0]
23662 mem := v_1
23663 if !(ValAndOff(valoff1).canAdd32(off2)) {
23664 break
23665 }
23666 v.reset(OpAMD64XORQconstmodify)
23667 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23668 v.Aux = symToAux(sym)
23669 v.AddArg2(base, mem)
23670 return true
23671 }
23672
23673
23674
23675 for {
23676 valoff1 := auxIntToValAndOff(v.AuxInt)
23677 sym1 := auxToSym(v.Aux)
23678 if v_0.Op != OpAMD64LEAQ {
23679 break
23680 }
23681 off2 := auxIntToInt32(v_0.AuxInt)
23682 sym2 := auxToSym(v_0.Aux)
23683 base := v_0.Args[0]
23684 mem := v_1
23685 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23686 break
23687 }
23688 v.reset(OpAMD64XORQconstmodify)
23689 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23690 v.Aux = symToAux(mergeSym(sym1, sym2))
23691 v.AddArg2(base, mem)
23692 return true
23693 }
23694 return false
23695 }
23696 func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
23697 v_2 := v.Args[2]
23698 v_1 := v.Args[1]
23699 v_0 := v.Args[0]
23700 b := v.Block
23701 typ := &b.Func.Config.Types
23702
23703
23704
23705 for {
23706 off1 := auxIntToInt32(v.AuxInt)
23707 sym := auxToSym(v.Aux)
23708 val := v_0
23709 if v_1.Op != OpAMD64ADDQconst {
23710 break
23711 }
23712 off2 := auxIntToInt32(v_1.AuxInt)
23713 base := v_1.Args[0]
23714 mem := v_2
23715 if !(is32Bit(int64(off1) + int64(off2))) {
23716 break
23717 }
23718 v.reset(OpAMD64XORQload)
23719 v.AuxInt = int32ToAuxInt(off1 + off2)
23720 v.Aux = symToAux(sym)
23721 v.AddArg3(val, base, mem)
23722 return true
23723 }
23724
23725
23726
23727 for {
23728 off1 := auxIntToInt32(v.AuxInt)
23729 sym1 := auxToSym(v.Aux)
23730 val := v_0
23731 if v_1.Op != OpAMD64LEAQ {
23732 break
23733 }
23734 off2 := auxIntToInt32(v_1.AuxInt)
23735 sym2 := auxToSym(v_1.Aux)
23736 base := v_1.Args[0]
23737 mem := v_2
23738 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23739 break
23740 }
23741 v.reset(OpAMD64XORQload)
23742 v.AuxInt = int32ToAuxInt(off1 + off2)
23743 v.Aux = symToAux(mergeSym(sym1, sym2))
23744 v.AddArg3(val, base, mem)
23745 return true
23746 }
23747
23748
23749 for {
23750 off := auxIntToInt32(v.AuxInt)
23751 sym := auxToSym(v.Aux)
23752 x := v_0
23753 ptr := v_1
23754 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23755 break
23756 }
23757 y := v_2.Args[1]
23758 if ptr != v_2.Args[0] {
23759 break
23760 }
23761 v.reset(OpAMD64XORQ)
23762 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
23763 v0.AddArg(y)
23764 v.AddArg2(x, v0)
23765 return true
23766 }
23767 return false
23768 }
23769 func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
23770 v_2 := v.Args[2]
23771 v_1 := v.Args[1]
23772 v_0 := v.Args[0]
23773
23774
23775
23776 for {
23777 off1 := auxIntToInt32(v.AuxInt)
23778 sym := auxToSym(v.Aux)
23779 if v_0.Op != OpAMD64ADDQconst {
23780 break
23781 }
23782 off2 := auxIntToInt32(v_0.AuxInt)
23783 base := v_0.Args[0]
23784 val := v_1
23785 mem := v_2
23786 if !(is32Bit(int64(off1) + int64(off2))) {
23787 break
23788 }
23789 v.reset(OpAMD64XORQmodify)
23790 v.AuxInt = int32ToAuxInt(off1 + off2)
23791 v.Aux = symToAux(sym)
23792 v.AddArg3(base, val, mem)
23793 return true
23794 }
23795
23796
23797
23798 for {
23799 off1 := auxIntToInt32(v.AuxInt)
23800 sym1 := auxToSym(v.Aux)
23801 if v_0.Op != OpAMD64LEAQ {
23802 break
23803 }
23804 off2 := auxIntToInt32(v_0.AuxInt)
23805 sym2 := auxToSym(v_0.Aux)
23806 base := v_0.Args[0]
23807 val := v_1
23808 mem := v_2
23809 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23810 break
23811 }
23812 v.reset(OpAMD64XORQmodify)
23813 v.AuxInt = int32ToAuxInt(off1 + off2)
23814 v.Aux = symToAux(mergeSym(sym1, sym2))
23815 v.AddArg3(base, val, mem)
23816 return true
23817 }
23818 return false
23819 }
23820 func rewriteValueAMD64_OpAddr(v *Value) bool {
23821 v_0 := v.Args[0]
23822
23823
23824 for {
23825 sym := auxToSym(v.Aux)
23826 base := v_0
23827 v.reset(OpAMD64LEAQ)
23828 v.Aux = symToAux(sym)
23829 v.AddArg(base)
23830 return true
23831 }
23832 }
23833 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
23834 v_2 := v.Args[2]
23835 v_1 := v.Args[1]
23836 v_0 := v.Args[0]
23837 b := v.Block
23838 typ := &b.Func.Config.Types
23839
23840
23841 for {
23842 ptr := v_0
23843 val := v_1
23844 mem := v_2
23845 v.reset(OpAMD64AddTupleFirst32)
23846 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
23847 v0.AddArg3(val, ptr, mem)
23848 v.AddArg2(val, v0)
23849 return true
23850 }
23851 }
23852 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
23853 v_2 := v.Args[2]
23854 v_1 := v.Args[1]
23855 v_0 := v.Args[0]
23856 b := v.Block
23857 typ := &b.Func.Config.Types
23858
23859
23860 for {
23861 ptr := v_0
23862 val := v_1
23863 mem := v_2
23864 v.reset(OpAMD64AddTupleFirst64)
23865 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
23866 v0.AddArg3(val, ptr, mem)
23867 v.AddArg2(val, v0)
23868 return true
23869 }
23870 }
23871 func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
23872 v_2 := v.Args[2]
23873 v_1 := v.Args[1]
23874 v_0 := v.Args[0]
23875
23876
23877 for {
23878 ptr := v_0
23879 val := v_1
23880 mem := v_2
23881 v.reset(OpAMD64ANDLlock)
23882 v.AddArg3(ptr, val, mem)
23883 return true
23884 }
23885 }
23886 func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool {
23887 v_2 := v.Args[2]
23888 v_1 := v.Args[1]
23889 v_0 := v.Args[0]
23890
23891
23892 for {
23893 ptr := v_0
23894 val := v_1
23895 mem := v_2
23896 v.reset(OpAMD64LoweredAtomicAnd32)
23897 v.AddArg3(ptr, val, mem)
23898 return true
23899 }
23900 }
23901 func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool {
23902 v_2 := v.Args[2]
23903 v_1 := v.Args[1]
23904 v_0 := v.Args[0]
23905
23906
23907 for {
23908 ptr := v_0
23909 val := v_1
23910 mem := v_2
23911 v.reset(OpAMD64LoweredAtomicAnd64)
23912 v.AddArg3(ptr, val, mem)
23913 return true
23914 }
23915 }
23916 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
23917 v_2 := v.Args[2]
23918 v_1 := v.Args[1]
23919 v_0 := v.Args[0]
23920
23921
23922 for {
23923 ptr := v_0
23924 val := v_1
23925 mem := v_2
23926 v.reset(OpAMD64ANDBlock)
23927 v.AddArg3(ptr, val, mem)
23928 return true
23929 }
23930 }
23931 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
23932 v_3 := v.Args[3]
23933 v_2 := v.Args[2]
23934 v_1 := v.Args[1]
23935 v_0 := v.Args[0]
23936
23937
23938 for {
23939 ptr := v_0
23940 old := v_1
23941 new_ := v_2
23942 mem := v_3
23943 v.reset(OpAMD64CMPXCHGLlock)
23944 v.AddArg4(ptr, old, new_, mem)
23945 return true
23946 }
23947 }
23948 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
23949 v_3 := v.Args[3]
23950 v_2 := v.Args[2]
23951 v_1 := v.Args[1]
23952 v_0 := v.Args[0]
23953
23954
23955 for {
23956 ptr := v_0
23957 old := v_1
23958 new_ := v_2
23959 mem := v_3
23960 v.reset(OpAMD64CMPXCHGQlock)
23961 v.AddArg4(ptr, old, new_, mem)
23962 return true
23963 }
23964 }
23965 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
23966 v_2 := v.Args[2]
23967 v_1 := v.Args[1]
23968 v_0 := v.Args[0]
23969
23970
23971 for {
23972 ptr := v_0
23973 val := v_1
23974 mem := v_2
23975 v.reset(OpAMD64XCHGL)
23976 v.AddArg3(val, ptr, mem)
23977 return true
23978 }
23979 }
23980 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
23981 v_2 := v.Args[2]
23982 v_1 := v.Args[1]
23983 v_0 := v.Args[0]
23984
23985
23986 for {
23987 ptr := v_0
23988 val := v_1
23989 mem := v_2
23990 v.reset(OpAMD64XCHGQ)
23991 v.AddArg3(val, ptr, mem)
23992 return true
23993 }
23994 }
23995 func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool {
23996 v_2 := v.Args[2]
23997 v_1 := v.Args[1]
23998 v_0 := v.Args[0]
23999
24000
24001 for {
24002 ptr := v_0
24003 val := v_1
24004 mem := v_2
24005 v.reset(OpAMD64XCHGB)
24006 v.AddArg3(val, ptr, mem)
24007 return true
24008 }
24009 }
24010 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
24011 v_1 := v.Args[1]
24012 v_0 := v.Args[0]
24013
24014
24015 for {
24016 ptr := v_0
24017 mem := v_1
24018 v.reset(OpAMD64MOVLatomicload)
24019 v.AddArg2(ptr, mem)
24020 return true
24021 }
24022 }
24023 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
24024 v_1 := v.Args[1]
24025 v_0 := v.Args[0]
24026
24027
24028 for {
24029 ptr := v_0
24030 mem := v_1
24031 v.reset(OpAMD64MOVQatomicload)
24032 v.AddArg2(ptr, mem)
24033 return true
24034 }
24035 }
24036 func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
24037 v_1 := v.Args[1]
24038 v_0 := v.Args[0]
24039
24040
24041 for {
24042 ptr := v_0
24043 mem := v_1
24044 v.reset(OpAMD64MOVBatomicload)
24045 v.AddArg2(ptr, mem)
24046 return true
24047 }
24048 }
24049 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
24050 v_1 := v.Args[1]
24051 v_0 := v.Args[0]
24052
24053
24054 for {
24055 ptr := v_0
24056 mem := v_1
24057 v.reset(OpAMD64MOVQatomicload)
24058 v.AddArg2(ptr, mem)
24059 return true
24060 }
24061 }
24062 func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
24063 v_2 := v.Args[2]
24064 v_1 := v.Args[1]
24065 v_0 := v.Args[0]
24066
24067
24068 for {
24069 ptr := v_0
24070 val := v_1
24071 mem := v_2
24072 v.reset(OpAMD64ORLlock)
24073 v.AddArg3(ptr, val, mem)
24074 return true
24075 }
24076 }
24077 func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool {
24078 v_2 := v.Args[2]
24079 v_1 := v.Args[1]
24080 v_0 := v.Args[0]
24081
24082
24083 for {
24084 ptr := v_0
24085 val := v_1
24086 mem := v_2
24087 v.reset(OpAMD64LoweredAtomicOr32)
24088 v.AddArg3(ptr, val, mem)
24089 return true
24090 }
24091 }
24092 func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool {
24093 v_2 := v.Args[2]
24094 v_1 := v.Args[1]
24095 v_0 := v.Args[0]
24096
24097
24098 for {
24099 ptr := v_0
24100 val := v_1
24101 mem := v_2
24102 v.reset(OpAMD64LoweredAtomicOr64)
24103 v.AddArg3(ptr, val, mem)
24104 return true
24105 }
24106 }
24107 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
24108 v_2 := v.Args[2]
24109 v_1 := v.Args[1]
24110 v_0 := v.Args[0]
24111
24112
24113 for {
24114 ptr := v_0
24115 val := v_1
24116 mem := v_2
24117 v.reset(OpAMD64ORBlock)
24118 v.AddArg3(ptr, val, mem)
24119 return true
24120 }
24121 }
24122 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
24123 v_2 := v.Args[2]
24124 v_1 := v.Args[1]
24125 v_0 := v.Args[0]
24126 b := v.Block
24127 typ := &b.Func.Config.Types
24128
24129
24130 for {
24131 ptr := v_0
24132 val := v_1
24133 mem := v_2
24134 v.reset(OpSelect1)
24135 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
24136 v0.AddArg3(val, ptr, mem)
24137 v.AddArg(v0)
24138 return true
24139 }
24140 }
24141 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
24142 v_2 := v.Args[2]
24143 v_1 := v.Args[1]
24144 v_0 := v.Args[0]
24145 b := v.Block
24146 typ := &b.Func.Config.Types
24147
24148
24149 for {
24150 ptr := v_0
24151 val := v_1
24152 mem := v_2
24153 v.reset(OpSelect1)
24154 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
24155 v0.AddArg3(val, ptr, mem)
24156 v.AddArg(v0)
24157 return true
24158 }
24159 }
24160 func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
24161 v_2 := v.Args[2]
24162 v_1 := v.Args[1]
24163 v_0 := v.Args[0]
24164 b := v.Block
24165 typ := &b.Func.Config.Types
24166
24167
24168 for {
24169 ptr := v_0
24170 val := v_1
24171 mem := v_2
24172 v.reset(OpSelect1)
24173 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
24174 v0.AddArg3(val, ptr, mem)
24175 v.AddArg(v0)
24176 return true
24177 }
24178 }
24179 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
24180 v_2 := v.Args[2]
24181 v_1 := v.Args[1]
24182 v_0 := v.Args[0]
24183 b := v.Block
24184 typ := &b.Func.Config.Types
24185
24186
24187 for {
24188 ptr := v_0
24189 val := v_1
24190 mem := v_2
24191 v.reset(OpSelect1)
24192 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
24193 v0.AddArg3(val, ptr, mem)
24194 v.AddArg(v0)
24195 return true
24196 }
24197 }
24198 func rewriteValueAMD64_OpBitLen16(v *Value) bool {
24199 v_0 := v.Args[0]
24200 b := v.Block
24201 typ := &b.Func.Config.Types
24202
24203
24204
24205 for {
24206 x := v_0
24207 if !(buildcfg.GOAMD64 < 3) {
24208 break
24209 }
24210 v.reset(OpAMD64BSRL)
24211 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24212 v0.AuxInt = int32ToAuxInt(1)
24213 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
24214 v1.AddArg(x)
24215 v0.AddArg2(v1, v1)
24216 v.AddArg(v0)
24217 return true
24218 }
24219
24220
24221
24222 for {
24223 t := v.Type
24224 x := v_0
24225 if !(buildcfg.GOAMD64 >= 3) {
24226 break
24227 }
24228 v.reset(OpAMD64NEGQ)
24229 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24230 v0.AuxInt = int32ToAuxInt(-32)
24231 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24232 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type)
24233 v2.AddArg(x)
24234 v1.AddArg(v2)
24235 v0.AddArg(v1)
24236 v.AddArg(v0)
24237 return true
24238 }
24239 return false
24240 }
24241 func rewriteValueAMD64_OpBitLen32(v *Value) bool {
24242 v_0 := v.Args[0]
24243 b := v.Block
24244 typ := &b.Func.Config.Types
24245
24246
24247
24248 for {
24249 x := v_0
24250 if !(buildcfg.GOAMD64 < 3) {
24251 break
24252 }
24253 v.reset(OpSelect0)
24254 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24255 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
24256 v1.AuxInt = int32ToAuxInt(1)
24257 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
24258 v2.AddArg(x)
24259 v1.AddArg2(v2, v2)
24260 v0.AddArg(v1)
24261 v.AddArg(v0)
24262 return true
24263 }
24264
24265
24266
24267 for {
24268 t := v.Type
24269 x := v_0
24270 if !(buildcfg.GOAMD64 >= 3) {
24271 break
24272 }
24273 v.reset(OpAMD64NEGQ)
24274 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24275 v0.AuxInt = int32ToAuxInt(-32)
24276 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24277 v1.AddArg(x)
24278 v0.AddArg(v1)
24279 v.AddArg(v0)
24280 return true
24281 }
24282 return false
24283 }
24284 func rewriteValueAMD64_OpBitLen64(v *Value) bool {
24285 v_0 := v.Args[0]
24286 b := v.Block
24287 typ := &b.Func.Config.Types
24288
24289
24290
24291 for {
24292 t := v.Type
24293 x := v_0
24294 if !(buildcfg.GOAMD64 < 3) {
24295 break
24296 }
24297 v.reset(OpAMD64ADDQconst)
24298 v.AuxInt = int32ToAuxInt(1)
24299 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
24300 v1 := b.NewValue0(v.Pos, OpSelect0, t)
24301 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24302 v2.AddArg(x)
24303 v1.AddArg(v2)
24304 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
24305 v3.AuxInt = int64ToAuxInt(-1)
24306 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
24307 v4.AddArg(v2)
24308 v0.AddArg3(v1, v3, v4)
24309 v.AddArg(v0)
24310 return true
24311 }
24312
24313
24314
24315 for {
24316 t := v.Type
24317 x := v_0
24318 if !(buildcfg.GOAMD64 >= 3) {
24319 break
24320 }
24321 v.reset(OpAMD64NEGQ)
24322 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24323 v0.AuxInt = int32ToAuxInt(-64)
24324 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64)
24325 v1.AddArg(x)
24326 v0.AddArg(v1)
24327 v.AddArg(v0)
24328 return true
24329 }
24330 return false
24331 }
24332 func rewriteValueAMD64_OpBitLen8(v *Value) bool {
24333 v_0 := v.Args[0]
24334 b := v.Block
24335 typ := &b.Func.Config.Types
24336
24337
24338
24339 for {
24340 x := v_0
24341 if !(buildcfg.GOAMD64 < 3) {
24342 break
24343 }
24344 v.reset(OpAMD64BSRL)
24345 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24346 v0.AuxInt = int32ToAuxInt(1)
24347 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
24348 v1.AddArg(x)
24349 v0.AddArg2(v1, v1)
24350 v.AddArg(v0)
24351 return true
24352 }
24353
24354
24355
24356 for {
24357 t := v.Type
24358 x := v_0
24359 if !(buildcfg.GOAMD64 >= 3) {
24360 break
24361 }
24362 v.reset(OpAMD64NEGQ)
24363 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24364 v0.AuxInt = int32ToAuxInt(-32)
24365 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24366 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type)
24367 v2.AddArg(x)
24368 v1.AddArg(v2)
24369 v0.AddArg(v1)
24370 v.AddArg(v0)
24371 return true
24372 }
24373 return false
24374 }
24375 func rewriteValueAMD64_OpBswap16(v *Value) bool {
24376 v_0 := v.Args[0]
24377
24378
24379 for {
24380 x := v_0
24381 v.reset(OpAMD64ROLWconst)
24382 v.AuxInt = int8ToAuxInt(8)
24383 v.AddArg(x)
24384 return true
24385 }
24386 }
24387 func rewriteValueAMD64_OpCeil(v *Value) bool {
24388 v_0 := v.Args[0]
24389
24390
24391 for {
24392 x := v_0
24393 v.reset(OpAMD64ROUNDSD)
24394 v.AuxInt = int8ToAuxInt(2)
24395 v.AddArg(x)
24396 return true
24397 }
24398 }
24399 func rewriteValueAMD64_OpCondSelect(v *Value) bool {
24400 v_2 := v.Args[2]
24401 v_1 := v.Args[1]
24402 v_0 := v.Args[0]
24403 b := v.Block
24404 typ := &b.Func.Config.Types
24405
24406
24407
24408 for {
24409 t := v.Type
24410 x := v_0
24411 y := v_1
24412 if v_2.Op != OpAMD64SETEQ {
24413 break
24414 }
24415 cond := v_2.Args[0]
24416 if !(is64BitInt(t) || isPtr(t)) {
24417 break
24418 }
24419 v.reset(OpAMD64CMOVQEQ)
24420 v.AddArg3(y, x, cond)
24421 return true
24422 }
24423
24424
24425
24426 for {
24427 t := v.Type
24428 x := v_0
24429 y := v_1
24430 if v_2.Op != OpAMD64SETNE {
24431 break
24432 }
24433 cond := v_2.Args[0]
24434 if !(is64BitInt(t) || isPtr(t)) {
24435 break
24436 }
24437 v.reset(OpAMD64CMOVQNE)
24438 v.AddArg3(y, x, cond)
24439 return true
24440 }
24441
24442
24443
24444 for {
24445 t := v.Type
24446 x := v_0
24447 y := v_1
24448 if v_2.Op != OpAMD64SETL {
24449 break
24450 }
24451 cond := v_2.Args[0]
24452 if !(is64BitInt(t) || isPtr(t)) {
24453 break
24454 }
24455 v.reset(OpAMD64CMOVQLT)
24456 v.AddArg3(y, x, cond)
24457 return true
24458 }
24459
24460
24461
24462 for {
24463 t := v.Type
24464 x := v_0
24465 y := v_1
24466 if v_2.Op != OpAMD64SETG {
24467 break
24468 }
24469 cond := v_2.Args[0]
24470 if !(is64BitInt(t) || isPtr(t)) {
24471 break
24472 }
24473 v.reset(OpAMD64CMOVQGT)
24474 v.AddArg3(y, x, cond)
24475 return true
24476 }
24477
24478
24479
24480 for {
24481 t := v.Type
24482 x := v_0
24483 y := v_1
24484 if v_2.Op != OpAMD64SETLE {
24485 break
24486 }
24487 cond := v_2.Args[0]
24488 if !(is64BitInt(t) || isPtr(t)) {
24489 break
24490 }
24491 v.reset(OpAMD64CMOVQLE)
24492 v.AddArg3(y, x, cond)
24493 return true
24494 }
24495
24496
24497
24498 for {
24499 t := v.Type
24500 x := v_0
24501 y := v_1
24502 if v_2.Op != OpAMD64SETGE {
24503 break
24504 }
24505 cond := v_2.Args[0]
24506 if !(is64BitInt(t) || isPtr(t)) {
24507 break
24508 }
24509 v.reset(OpAMD64CMOVQGE)
24510 v.AddArg3(y, x, cond)
24511 return true
24512 }
24513
24514
24515
24516 for {
24517 t := v.Type
24518 x := v_0
24519 y := v_1
24520 if v_2.Op != OpAMD64SETA {
24521 break
24522 }
24523 cond := v_2.Args[0]
24524 if !(is64BitInt(t) || isPtr(t)) {
24525 break
24526 }
24527 v.reset(OpAMD64CMOVQHI)
24528 v.AddArg3(y, x, cond)
24529 return true
24530 }
24531
24532
24533
24534 for {
24535 t := v.Type
24536 x := v_0
24537 y := v_1
24538 if v_2.Op != OpAMD64SETB {
24539 break
24540 }
24541 cond := v_2.Args[0]
24542 if !(is64BitInt(t) || isPtr(t)) {
24543 break
24544 }
24545 v.reset(OpAMD64CMOVQCS)
24546 v.AddArg3(y, x, cond)
24547 return true
24548 }
24549
24550
24551
24552 for {
24553 t := v.Type
24554 x := v_0
24555 y := v_1
24556 if v_2.Op != OpAMD64SETAE {
24557 break
24558 }
24559 cond := v_2.Args[0]
24560 if !(is64BitInt(t) || isPtr(t)) {
24561 break
24562 }
24563 v.reset(OpAMD64CMOVQCC)
24564 v.AddArg3(y, x, cond)
24565 return true
24566 }
24567
24568
24569
24570 for {
24571 t := v.Type
24572 x := v_0
24573 y := v_1
24574 if v_2.Op != OpAMD64SETBE {
24575 break
24576 }
24577 cond := v_2.Args[0]
24578 if !(is64BitInt(t) || isPtr(t)) {
24579 break
24580 }
24581 v.reset(OpAMD64CMOVQLS)
24582 v.AddArg3(y, x, cond)
24583 return true
24584 }
24585
24586
24587
24588 for {
24589 t := v.Type
24590 x := v_0
24591 y := v_1
24592 if v_2.Op != OpAMD64SETEQF {
24593 break
24594 }
24595 cond := v_2.Args[0]
24596 if !(is64BitInt(t) || isPtr(t)) {
24597 break
24598 }
24599 v.reset(OpAMD64CMOVQEQF)
24600 v.AddArg3(y, x, cond)
24601 return true
24602 }
24603
24604
24605
24606 for {
24607 t := v.Type
24608 x := v_0
24609 y := v_1
24610 if v_2.Op != OpAMD64SETNEF {
24611 break
24612 }
24613 cond := v_2.Args[0]
24614 if !(is64BitInt(t) || isPtr(t)) {
24615 break
24616 }
24617 v.reset(OpAMD64CMOVQNEF)
24618 v.AddArg3(y, x, cond)
24619 return true
24620 }
24621
24622
24623
24624 for {
24625 t := v.Type
24626 x := v_0
24627 y := v_1
24628 if v_2.Op != OpAMD64SETGF {
24629 break
24630 }
24631 cond := v_2.Args[0]
24632 if !(is64BitInt(t) || isPtr(t)) {
24633 break
24634 }
24635 v.reset(OpAMD64CMOVQGTF)
24636 v.AddArg3(y, x, cond)
24637 return true
24638 }
24639
24640
24641
24642 for {
24643 t := v.Type
24644 x := v_0
24645 y := v_1
24646 if v_2.Op != OpAMD64SETGEF {
24647 break
24648 }
24649 cond := v_2.Args[0]
24650 if !(is64BitInt(t) || isPtr(t)) {
24651 break
24652 }
24653 v.reset(OpAMD64CMOVQGEF)
24654 v.AddArg3(y, x, cond)
24655 return true
24656 }
24657
24658
24659
24660 for {
24661 t := v.Type
24662 x := v_0
24663 y := v_1
24664 if v_2.Op != OpAMD64SETEQ {
24665 break
24666 }
24667 cond := v_2.Args[0]
24668 if !(is32BitInt(t)) {
24669 break
24670 }
24671 v.reset(OpAMD64CMOVLEQ)
24672 v.AddArg3(y, x, cond)
24673 return true
24674 }
24675
24676
24677
24678 for {
24679 t := v.Type
24680 x := v_0
24681 y := v_1
24682 if v_2.Op != OpAMD64SETNE {
24683 break
24684 }
24685 cond := v_2.Args[0]
24686 if !(is32BitInt(t)) {
24687 break
24688 }
24689 v.reset(OpAMD64CMOVLNE)
24690 v.AddArg3(y, x, cond)
24691 return true
24692 }
24693
24694
24695
24696 for {
24697 t := v.Type
24698 x := v_0
24699 y := v_1
24700 if v_2.Op != OpAMD64SETL {
24701 break
24702 }
24703 cond := v_2.Args[0]
24704 if !(is32BitInt(t)) {
24705 break
24706 }
24707 v.reset(OpAMD64CMOVLLT)
24708 v.AddArg3(y, x, cond)
24709 return true
24710 }
24711
24712
24713
24714 for {
24715 t := v.Type
24716 x := v_0
24717 y := v_1
24718 if v_2.Op != OpAMD64SETG {
24719 break
24720 }
24721 cond := v_2.Args[0]
24722 if !(is32BitInt(t)) {
24723 break
24724 }
24725 v.reset(OpAMD64CMOVLGT)
24726 v.AddArg3(y, x, cond)
24727 return true
24728 }
24729
24730
24731
24732 for {
24733 t := v.Type
24734 x := v_0
24735 y := v_1
24736 if v_2.Op != OpAMD64SETLE {
24737 break
24738 }
24739 cond := v_2.Args[0]
24740 if !(is32BitInt(t)) {
24741 break
24742 }
24743 v.reset(OpAMD64CMOVLLE)
24744 v.AddArg3(y, x, cond)
24745 return true
24746 }
24747
24748
24749
24750 for {
24751 t := v.Type
24752 x := v_0
24753 y := v_1
24754 if v_2.Op != OpAMD64SETGE {
24755 break
24756 }
24757 cond := v_2.Args[0]
24758 if !(is32BitInt(t)) {
24759 break
24760 }
24761 v.reset(OpAMD64CMOVLGE)
24762 v.AddArg3(y, x, cond)
24763 return true
24764 }
24765
24766
24767
24768 for {
24769 t := v.Type
24770 x := v_0
24771 y := v_1
24772 if v_2.Op != OpAMD64SETA {
24773 break
24774 }
24775 cond := v_2.Args[0]
24776 if !(is32BitInt(t)) {
24777 break
24778 }
24779 v.reset(OpAMD64CMOVLHI)
24780 v.AddArg3(y, x, cond)
24781 return true
24782 }
24783
24784
24785
24786 for {
24787 t := v.Type
24788 x := v_0
24789 y := v_1
24790 if v_2.Op != OpAMD64SETB {
24791 break
24792 }
24793 cond := v_2.Args[0]
24794 if !(is32BitInt(t)) {
24795 break
24796 }
24797 v.reset(OpAMD64CMOVLCS)
24798 v.AddArg3(y, x, cond)
24799 return true
24800 }
24801
24802
24803
24804 for {
24805 t := v.Type
24806 x := v_0
24807 y := v_1
24808 if v_2.Op != OpAMD64SETAE {
24809 break
24810 }
24811 cond := v_2.Args[0]
24812 if !(is32BitInt(t)) {
24813 break
24814 }
24815 v.reset(OpAMD64CMOVLCC)
24816 v.AddArg3(y, x, cond)
24817 return true
24818 }
24819
24820
24821
24822 for {
24823 t := v.Type
24824 x := v_0
24825 y := v_1
24826 if v_2.Op != OpAMD64SETBE {
24827 break
24828 }
24829 cond := v_2.Args[0]
24830 if !(is32BitInt(t)) {
24831 break
24832 }
24833 v.reset(OpAMD64CMOVLLS)
24834 v.AddArg3(y, x, cond)
24835 return true
24836 }
24837
24838
24839
24840 for {
24841 t := v.Type
24842 x := v_0
24843 y := v_1
24844 if v_2.Op != OpAMD64SETEQF {
24845 break
24846 }
24847 cond := v_2.Args[0]
24848 if !(is32BitInt(t)) {
24849 break
24850 }
24851 v.reset(OpAMD64CMOVLEQF)
24852 v.AddArg3(y, x, cond)
24853 return true
24854 }
24855
24856
24857
24858 for {
24859 t := v.Type
24860 x := v_0
24861 y := v_1
24862 if v_2.Op != OpAMD64SETNEF {
24863 break
24864 }
24865 cond := v_2.Args[0]
24866 if !(is32BitInt(t)) {
24867 break
24868 }
24869 v.reset(OpAMD64CMOVLNEF)
24870 v.AddArg3(y, x, cond)
24871 return true
24872 }
24873
24874
24875
24876 for {
24877 t := v.Type
24878 x := v_0
24879 y := v_1
24880 if v_2.Op != OpAMD64SETGF {
24881 break
24882 }
24883 cond := v_2.Args[0]
24884 if !(is32BitInt(t)) {
24885 break
24886 }
24887 v.reset(OpAMD64CMOVLGTF)
24888 v.AddArg3(y, x, cond)
24889 return true
24890 }
24891
24892
24893
24894 for {
24895 t := v.Type
24896 x := v_0
24897 y := v_1
24898 if v_2.Op != OpAMD64SETGEF {
24899 break
24900 }
24901 cond := v_2.Args[0]
24902 if !(is32BitInt(t)) {
24903 break
24904 }
24905 v.reset(OpAMD64CMOVLGEF)
24906 v.AddArg3(y, x, cond)
24907 return true
24908 }
24909
24910
24911
24912 for {
24913 t := v.Type
24914 x := v_0
24915 y := v_1
24916 if v_2.Op != OpAMD64SETEQ {
24917 break
24918 }
24919 cond := v_2.Args[0]
24920 if !(is16BitInt(t)) {
24921 break
24922 }
24923 v.reset(OpAMD64CMOVWEQ)
24924 v.AddArg3(y, x, cond)
24925 return true
24926 }
24927
24928
24929
24930 for {
24931 t := v.Type
24932 x := v_0
24933 y := v_1
24934 if v_2.Op != OpAMD64SETNE {
24935 break
24936 }
24937 cond := v_2.Args[0]
24938 if !(is16BitInt(t)) {
24939 break
24940 }
24941 v.reset(OpAMD64CMOVWNE)
24942 v.AddArg3(y, x, cond)
24943 return true
24944 }
24945
24946
24947
24948 for {
24949 t := v.Type
24950 x := v_0
24951 y := v_1
24952 if v_2.Op != OpAMD64SETL {
24953 break
24954 }
24955 cond := v_2.Args[0]
24956 if !(is16BitInt(t)) {
24957 break
24958 }
24959 v.reset(OpAMD64CMOVWLT)
24960 v.AddArg3(y, x, cond)
24961 return true
24962 }
24963
24964
24965
24966 for {
24967 t := v.Type
24968 x := v_0
24969 y := v_1
24970 if v_2.Op != OpAMD64SETG {
24971 break
24972 }
24973 cond := v_2.Args[0]
24974 if !(is16BitInt(t)) {
24975 break
24976 }
24977 v.reset(OpAMD64CMOVWGT)
24978 v.AddArg3(y, x, cond)
24979 return true
24980 }
24981
24982
24983
24984 for {
24985 t := v.Type
24986 x := v_0
24987 y := v_1
24988 if v_2.Op != OpAMD64SETLE {
24989 break
24990 }
24991 cond := v_2.Args[0]
24992 if !(is16BitInt(t)) {
24993 break
24994 }
24995 v.reset(OpAMD64CMOVWLE)
24996 v.AddArg3(y, x, cond)
24997 return true
24998 }
24999
25000
25001
25002 for {
25003 t := v.Type
25004 x := v_0
25005 y := v_1
25006 if v_2.Op != OpAMD64SETGE {
25007 break
25008 }
25009 cond := v_2.Args[0]
25010 if !(is16BitInt(t)) {
25011 break
25012 }
25013 v.reset(OpAMD64CMOVWGE)
25014 v.AddArg3(y, x, cond)
25015 return true
25016 }
25017
25018
25019
25020 for {
25021 t := v.Type
25022 x := v_0
25023 y := v_1
25024 if v_2.Op != OpAMD64SETA {
25025 break
25026 }
25027 cond := v_2.Args[0]
25028 if !(is16BitInt(t)) {
25029 break
25030 }
25031 v.reset(OpAMD64CMOVWHI)
25032 v.AddArg3(y, x, cond)
25033 return true
25034 }
25035
25036
25037
25038 for {
25039 t := v.Type
25040 x := v_0
25041 y := v_1
25042 if v_2.Op != OpAMD64SETB {
25043 break
25044 }
25045 cond := v_2.Args[0]
25046 if !(is16BitInt(t)) {
25047 break
25048 }
25049 v.reset(OpAMD64CMOVWCS)
25050 v.AddArg3(y, x, cond)
25051 return true
25052 }
25053
25054
25055
25056 for {
25057 t := v.Type
25058 x := v_0
25059 y := v_1
25060 if v_2.Op != OpAMD64SETAE {
25061 break
25062 }
25063 cond := v_2.Args[0]
25064 if !(is16BitInt(t)) {
25065 break
25066 }
25067 v.reset(OpAMD64CMOVWCC)
25068 v.AddArg3(y, x, cond)
25069 return true
25070 }
25071
25072
25073
25074 for {
25075 t := v.Type
25076 x := v_0
25077 y := v_1
25078 if v_2.Op != OpAMD64SETBE {
25079 break
25080 }
25081 cond := v_2.Args[0]
25082 if !(is16BitInt(t)) {
25083 break
25084 }
25085 v.reset(OpAMD64CMOVWLS)
25086 v.AddArg3(y, x, cond)
25087 return true
25088 }
25089
25090
25091
25092 for {
25093 t := v.Type
25094 x := v_0
25095 y := v_1
25096 if v_2.Op != OpAMD64SETEQF {
25097 break
25098 }
25099 cond := v_2.Args[0]
25100 if !(is16BitInt(t)) {
25101 break
25102 }
25103 v.reset(OpAMD64CMOVWEQF)
25104 v.AddArg3(y, x, cond)
25105 return true
25106 }
25107
25108
25109
25110 for {
25111 t := v.Type
25112 x := v_0
25113 y := v_1
25114 if v_2.Op != OpAMD64SETNEF {
25115 break
25116 }
25117 cond := v_2.Args[0]
25118 if !(is16BitInt(t)) {
25119 break
25120 }
25121 v.reset(OpAMD64CMOVWNEF)
25122 v.AddArg3(y, x, cond)
25123 return true
25124 }
25125
25126
25127
25128 for {
25129 t := v.Type
25130 x := v_0
25131 y := v_1
25132 if v_2.Op != OpAMD64SETGF {
25133 break
25134 }
25135 cond := v_2.Args[0]
25136 if !(is16BitInt(t)) {
25137 break
25138 }
25139 v.reset(OpAMD64CMOVWGTF)
25140 v.AddArg3(y, x, cond)
25141 return true
25142 }
25143
25144
25145
25146 for {
25147 t := v.Type
25148 x := v_0
25149 y := v_1
25150 if v_2.Op != OpAMD64SETGEF {
25151 break
25152 }
25153 cond := v_2.Args[0]
25154 if !(is16BitInt(t)) {
25155 break
25156 }
25157 v.reset(OpAMD64CMOVWGEF)
25158 v.AddArg3(y, x, cond)
25159 return true
25160 }
25161
25162
25163
25164 for {
25165 t := v.Type
25166 x := v_0
25167 y := v_1
25168 check := v_2
25169 if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
25170 break
25171 }
25172 v.reset(OpCondSelect)
25173 v.Type = t
25174 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
25175 v0.AddArg(check)
25176 v.AddArg3(x, y, v0)
25177 return true
25178 }
25179
25180
25181
25182 for {
25183 t := v.Type
25184 x := v_0
25185 y := v_1
25186 check := v_2
25187 if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
25188 break
25189 }
25190 v.reset(OpCondSelect)
25191 v.Type = t
25192 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
25193 v0.AddArg(check)
25194 v.AddArg3(x, y, v0)
25195 return true
25196 }
25197
25198
25199
25200 for {
25201 t := v.Type
25202 x := v_0
25203 y := v_1
25204 check := v_2
25205 if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
25206 break
25207 }
25208 v.reset(OpCondSelect)
25209 v.Type = t
25210 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
25211 v0.AddArg(check)
25212 v.AddArg3(x, y, v0)
25213 return true
25214 }
25215
25216
25217
25218 for {
25219 t := v.Type
25220 x := v_0
25221 y := v_1
25222 check := v_2
25223 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
25224 break
25225 }
25226 v.reset(OpAMD64CMOVQNE)
25227 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25228 v0.AuxInt = int32ToAuxInt(0)
25229 v0.AddArg(check)
25230 v.AddArg3(y, x, v0)
25231 return true
25232 }
25233
25234
25235
25236 for {
25237 t := v.Type
25238 x := v_0
25239 y := v_1
25240 check := v_2
25241 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
25242 break
25243 }
25244 v.reset(OpAMD64CMOVLNE)
25245 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25246 v0.AuxInt = int32ToAuxInt(0)
25247 v0.AddArg(check)
25248 v.AddArg3(y, x, v0)
25249 return true
25250 }
25251
25252
25253
25254 for {
25255 t := v.Type
25256 x := v_0
25257 y := v_1
25258 check := v_2
25259 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
25260 break
25261 }
25262 v.reset(OpAMD64CMOVWNE)
25263 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25264 v0.AuxInt = int32ToAuxInt(0)
25265 v0.AddArg(check)
25266 v.AddArg3(y, x, v0)
25267 return true
25268 }
25269 return false
25270 }
25271 func rewriteValueAMD64_OpConst16(v *Value) bool {
25272
25273
25274 for {
25275 c := auxIntToInt16(v.AuxInt)
25276 v.reset(OpAMD64MOVLconst)
25277 v.AuxInt = int32ToAuxInt(int32(c))
25278 return true
25279 }
25280 }
25281 func rewriteValueAMD64_OpConst8(v *Value) bool {
25282
25283
25284 for {
25285 c := auxIntToInt8(v.AuxInt)
25286 v.reset(OpAMD64MOVLconst)
25287 v.AuxInt = int32ToAuxInt(int32(c))
25288 return true
25289 }
25290 }
25291 func rewriteValueAMD64_OpConstBool(v *Value) bool {
25292
25293
25294 for {
25295 c := auxIntToBool(v.AuxInt)
25296 v.reset(OpAMD64MOVLconst)
25297 v.AuxInt = int32ToAuxInt(b2i32(c))
25298 return true
25299 }
25300 }
25301 func rewriteValueAMD64_OpConstNil(v *Value) bool {
25302
25303
25304 for {
25305 v.reset(OpAMD64MOVQconst)
25306 v.AuxInt = int64ToAuxInt(0)
25307 return true
25308 }
25309 }
25310 func rewriteValueAMD64_OpCtz16(v *Value) bool {
25311 v_0 := v.Args[0]
25312 b := v.Block
25313 typ := &b.Func.Config.Types
25314
25315
25316 for {
25317 x := v_0
25318 v.reset(OpAMD64BSFL)
25319 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25320 v0.AuxInt = int32ToAuxInt(1 << 16)
25321 v0.AddArg(x)
25322 v.AddArg(v0)
25323 return true
25324 }
25325 }
25326 func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool {
25327 v_0 := v.Args[0]
25328
25329
25330
25331 for {
25332 x := v_0
25333 if !(buildcfg.GOAMD64 >= 3) {
25334 break
25335 }
25336 v.reset(OpAMD64TZCNTL)
25337 v.AddArg(x)
25338 return true
25339 }
25340
25341
25342
25343 for {
25344 x := v_0
25345 if !(buildcfg.GOAMD64 < 3) {
25346 break
25347 }
25348 v.reset(OpAMD64BSFL)
25349 v.AddArg(x)
25350 return true
25351 }
25352 return false
25353 }
25354 func rewriteValueAMD64_OpCtz32(v *Value) bool {
25355 v_0 := v.Args[0]
25356 b := v.Block
25357 typ := &b.Func.Config.Types
25358
25359
25360
25361 for {
25362 x := v_0
25363 if !(buildcfg.GOAMD64 >= 3) {
25364 break
25365 }
25366 v.reset(OpAMD64TZCNTL)
25367 v.AddArg(x)
25368 return true
25369 }
25370
25371
25372
25373 for {
25374 x := v_0
25375 if !(buildcfg.GOAMD64 < 3) {
25376 break
25377 }
25378 v.reset(OpSelect0)
25379 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25380 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
25381 v1.AuxInt = int8ToAuxInt(32)
25382 v1.AddArg(x)
25383 v0.AddArg(v1)
25384 v.AddArg(v0)
25385 return true
25386 }
25387 return false
25388 }
25389 func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool {
25390 v_0 := v.Args[0]
25391
25392
25393
25394 for {
25395 x := v_0
25396 if !(buildcfg.GOAMD64 >= 3) {
25397 break
25398 }
25399 v.reset(OpAMD64TZCNTL)
25400 v.AddArg(x)
25401 return true
25402 }
25403
25404
25405
25406 for {
25407 x := v_0
25408 if !(buildcfg.GOAMD64 < 3) {
25409 break
25410 }
25411 v.reset(OpAMD64BSFL)
25412 v.AddArg(x)
25413 return true
25414 }
25415 return false
25416 }
25417 func rewriteValueAMD64_OpCtz64(v *Value) bool {
25418 v_0 := v.Args[0]
25419 b := v.Block
25420 typ := &b.Func.Config.Types
25421
25422
25423
25424 for {
25425 x := v_0
25426 if !(buildcfg.GOAMD64 >= 3) {
25427 break
25428 }
25429 v.reset(OpAMD64TZCNTQ)
25430 v.AddArg(x)
25431 return true
25432 }
25433
25434
25435
25436 for {
25437 t := v.Type
25438 x := v_0
25439 if !(buildcfg.GOAMD64 < 3) {
25440 break
25441 }
25442 v.reset(OpAMD64CMOVQEQ)
25443 v0 := b.NewValue0(v.Pos, OpSelect0, t)
25444 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25445 v1.AddArg(x)
25446 v0.AddArg(v1)
25447 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
25448 v2.AuxInt = int64ToAuxInt(64)
25449 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
25450 v3.AddArg(v1)
25451 v.AddArg3(v0, v2, v3)
25452 return true
25453 }
25454 return false
25455 }
25456 func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
25457 v_0 := v.Args[0]
25458 b := v.Block
25459 typ := &b.Func.Config.Types
25460
25461
25462
25463 for {
25464 x := v_0
25465 if !(buildcfg.GOAMD64 >= 3) {
25466 break
25467 }
25468 v.reset(OpAMD64TZCNTQ)
25469 v.AddArg(x)
25470 return true
25471 }
25472
25473
25474
25475 for {
25476 x := v_0
25477 if !(buildcfg.GOAMD64 < 3) {
25478 break
25479 }
25480 v.reset(OpSelect0)
25481 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25482 v0.AddArg(x)
25483 v.AddArg(v0)
25484 return true
25485 }
25486 return false
25487 }
25488 func rewriteValueAMD64_OpCtz8(v *Value) bool {
25489 v_0 := v.Args[0]
25490 b := v.Block
25491 typ := &b.Func.Config.Types
25492
25493
25494 for {
25495 x := v_0
25496 v.reset(OpAMD64BSFL)
25497 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25498 v0.AuxInt = int32ToAuxInt(1 << 8)
25499 v0.AddArg(x)
25500 v.AddArg(v0)
25501 return true
25502 }
25503 }
25504 func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool {
25505 v_0 := v.Args[0]
25506
25507
25508
25509 for {
25510 x := v_0
25511 if !(buildcfg.GOAMD64 >= 3) {
25512 break
25513 }
25514 v.reset(OpAMD64TZCNTL)
25515 v.AddArg(x)
25516 return true
25517 }
25518
25519
25520
25521 for {
25522 x := v_0
25523 if !(buildcfg.GOAMD64 < 3) {
25524 break
25525 }
25526 v.reset(OpAMD64BSFL)
25527 v.AddArg(x)
25528 return true
25529 }
25530 return false
25531 }
25532 func rewriteValueAMD64_OpDiv16(v *Value) bool {
25533 v_1 := v.Args[1]
25534 v_0 := v.Args[0]
25535 b := v.Block
25536 typ := &b.Func.Config.Types
25537
25538
25539 for {
25540 a := auxIntToBool(v.AuxInt)
25541 x := v_0
25542 y := v_1
25543 v.reset(OpSelect0)
25544 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
25545 v0.AuxInt = boolToAuxInt(a)
25546 v0.AddArg2(x, y)
25547 v.AddArg(v0)
25548 return true
25549 }
25550 }
25551 func rewriteValueAMD64_OpDiv16u(v *Value) bool {
25552 v_1 := v.Args[1]
25553 v_0 := v.Args[0]
25554 b := v.Block
25555 typ := &b.Func.Config.Types
25556
25557
25558 for {
25559 x := v_0
25560 y := v_1
25561 v.reset(OpSelect0)
25562 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
25563 v0.AddArg2(x, y)
25564 v.AddArg(v0)
25565 return true
25566 }
25567 }
25568 func rewriteValueAMD64_OpDiv32(v *Value) bool {
25569 v_1 := v.Args[1]
25570 v_0 := v.Args[0]
25571 b := v.Block
25572 typ := &b.Func.Config.Types
25573
25574
25575 for {
25576 a := auxIntToBool(v.AuxInt)
25577 x := v_0
25578 y := v_1
25579 v.reset(OpSelect0)
25580 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
25581 v0.AuxInt = boolToAuxInt(a)
25582 v0.AddArg2(x, y)
25583 v.AddArg(v0)
25584 return true
25585 }
25586 }
25587 func rewriteValueAMD64_OpDiv32u(v *Value) bool {
25588 v_1 := v.Args[1]
25589 v_0 := v.Args[0]
25590 b := v.Block
25591 typ := &b.Func.Config.Types
25592
25593
25594 for {
25595 x := v_0
25596 y := v_1
25597 v.reset(OpSelect0)
25598 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
25599 v0.AddArg2(x, y)
25600 v.AddArg(v0)
25601 return true
25602 }
25603 }
25604 func rewriteValueAMD64_OpDiv64(v *Value) bool {
25605 v_1 := v.Args[1]
25606 v_0 := v.Args[0]
25607 b := v.Block
25608 typ := &b.Func.Config.Types
25609
25610
25611 for {
25612 a := auxIntToBool(v.AuxInt)
25613 x := v_0
25614 y := v_1
25615 v.reset(OpSelect0)
25616 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
25617 v0.AuxInt = boolToAuxInt(a)
25618 v0.AddArg2(x, y)
25619 v.AddArg(v0)
25620 return true
25621 }
25622 }
25623 func rewriteValueAMD64_OpDiv64u(v *Value) bool {
25624 v_1 := v.Args[1]
25625 v_0 := v.Args[0]
25626 b := v.Block
25627 typ := &b.Func.Config.Types
25628
25629
25630 for {
25631 x := v_0
25632 y := v_1
25633 v.reset(OpSelect0)
25634 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
25635 v0.AddArg2(x, y)
25636 v.AddArg(v0)
25637 return true
25638 }
25639 }
25640 func rewriteValueAMD64_OpDiv8(v *Value) bool {
25641 v_1 := v.Args[1]
25642 v_0 := v.Args[0]
25643 b := v.Block
25644 typ := &b.Func.Config.Types
25645
25646
25647 for {
25648 x := v_0
25649 y := v_1
25650 v.reset(OpSelect0)
25651 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
25652 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
25653 v1.AddArg(x)
25654 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
25655 v2.AddArg(y)
25656 v0.AddArg2(v1, v2)
25657 v.AddArg(v0)
25658 return true
25659 }
25660 }
25661 func rewriteValueAMD64_OpDiv8u(v *Value) bool {
25662 v_1 := v.Args[1]
25663 v_0 := v.Args[0]
25664 b := v.Block
25665 typ := &b.Func.Config.Types
25666
25667
25668 for {
25669 x := v_0
25670 y := v_1
25671 v.reset(OpSelect0)
25672 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
25673 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
25674 v1.AddArg(x)
25675 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
25676 v2.AddArg(y)
25677 v0.AddArg2(v1, v2)
25678 v.AddArg(v0)
25679 return true
25680 }
25681 }
25682 func rewriteValueAMD64_OpEq16(v *Value) bool {
25683 v_1 := v.Args[1]
25684 v_0 := v.Args[0]
25685 b := v.Block
25686
25687
25688 for {
25689 x := v_0
25690 y := v_1
25691 v.reset(OpAMD64SETEQ)
25692 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25693 v0.AddArg2(x, y)
25694 v.AddArg(v0)
25695 return true
25696 }
25697 }
25698 func rewriteValueAMD64_OpEq32(v *Value) bool {
25699 v_1 := v.Args[1]
25700 v_0 := v.Args[0]
25701 b := v.Block
25702
25703
25704 for {
25705 x := v_0
25706 y := v_1
25707 v.reset(OpAMD64SETEQ)
25708 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25709 v0.AddArg2(x, y)
25710 v.AddArg(v0)
25711 return true
25712 }
25713 }
25714 func rewriteValueAMD64_OpEq32F(v *Value) bool {
25715 v_1 := v.Args[1]
25716 v_0 := v.Args[0]
25717 b := v.Block
25718
25719
25720 for {
25721 x := v_0
25722 y := v_1
25723 v.reset(OpAMD64SETEQF)
25724 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
25725 v0.AddArg2(x, y)
25726 v.AddArg(v0)
25727 return true
25728 }
25729 }
25730 func rewriteValueAMD64_OpEq64(v *Value) bool {
25731 v_1 := v.Args[1]
25732 v_0 := v.Args[0]
25733 b := v.Block
25734
25735
25736 for {
25737 x := v_0
25738 y := v_1
25739 v.reset(OpAMD64SETEQ)
25740 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25741 v0.AddArg2(x, y)
25742 v.AddArg(v0)
25743 return true
25744 }
25745 }
25746 func rewriteValueAMD64_OpEq64F(v *Value) bool {
25747 v_1 := v.Args[1]
25748 v_0 := v.Args[0]
25749 b := v.Block
25750
25751
25752 for {
25753 x := v_0
25754 y := v_1
25755 v.reset(OpAMD64SETEQF)
25756 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
25757 v0.AddArg2(x, y)
25758 v.AddArg(v0)
25759 return true
25760 }
25761 }
25762 func rewriteValueAMD64_OpEq8(v *Value) bool {
25763 v_1 := v.Args[1]
25764 v_0 := v.Args[0]
25765 b := v.Block
25766
25767
25768 for {
25769 x := v_0
25770 y := v_1
25771 v.reset(OpAMD64SETEQ)
25772 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25773 v0.AddArg2(x, y)
25774 v.AddArg(v0)
25775 return true
25776 }
25777 }
25778 func rewriteValueAMD64_OpEqB(v *Value) bool {
25779 v_1 := v.Args[1]
25780 v_0 := v.Args[0]
25781 b := v.Block
25782
25783
25784 for {
25785 x := v_0
25786 y := v_1
25787 v.reset(OpAMD64SETEQ)
25788 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25789 v0.AddArg2(x, y)
25790 v.AddArg(v0)
25791 return true
25792 }
25793 }
25794 func rewriteValueAMD64_OpEqPtr(v *Value) bool {
25795 v_1 := v.Args[1]
25796 v_0 := v.Args[0]
25797 b := v.Block
25798
25799
25800 for {
25801 x := v_0
25802 y := v_1
25803 v.reset(OpAMD64SETEQ)
25804 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25805 v0.AddArg2(x, y)
25806 v.AddArg(v0)
25807 return true
25808 }
25809 }
25810 func rewriteValueAMD64_OpFMA(v *Value) bool {
25811 v_2 := v.Args[2]
25812 v_1 := v.Args[1]
25813 v_0 := v.Args[0]
25814
25815
25816 for {
25817 x := v_0
25818 y := v_1
25819 z := v_2
25820 v.reset(OpAMD64VFMADD231SD)
25821 v.AddArg3(z, x, y)
25822 return true
25823 }
25824 }
25825 func rewriteValueAMD64_OpFloor(v *Value) bool {
25826 v_0 := v.Args[0]
25827
25828
25829 for {
25830 x := v_0
25831 v.reset(OpAMD64ROUNDSD)
25832 v.AuxInt = int8ToAuxInt(1)
25833 v.AddArg(x)
25834 return true
25835 }
25836 }
25837 func rewriteValueAMD64_OpGetG(v *Value) bool {
25838 v_0 := v.Args[0]
25839
25840
25841
25842 for {
25843 mem := v_0
25844 if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
25845 break
25846 }
25847 v.reset(OpAMD64LoweredGetG)
25848 v.AddArg(mem)
25849 return true
25850 }
25851 return false
25852 }
25853 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
25854 b := v.Block
25855 typ := &b.Func.Config.Types
25856
25857
25858 for {
25859 s := auxToSym(v.Aux)
25860 v.reset(OpAMD64SETNE)
25861 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
25862 v0.AuxInt = int32ToAuxInt(0)
25863 v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
25864 v1.Aux = symToAux(s)
25865 v0.AddArg(v1)
25866 v.AddArg(v0)
25867 return true
25868 }
25869 }
25870 func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
25871 v_1 := v.Args[1]
25872 v_0 := v.Args[0]
25873 b := v.Block
25874
25875
25876 for {
25877 idx := v_0
25878 len := v_1
25879 v.reset(OpAMD64SETB)
25880 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25881 v0.AddArg2(idx, len)
25882 v.AddArg(v0)
25883 return true
25884 }
25885 }
25886 func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
25887 v_0 := v.Args[0]
25888 b := v.Block
25889
25890
25891 for {
25892 p := v_0
25893 v.reset(OpAMD64SETNE)
25894 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
25895 v0.AddArg2(p, p)
25896 v.AddArg(v0)
25897 return true
25898 }
25899 }
25900 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
25901 v_1 := v.Args[1]
25902 v_0 := v.Args[0]
25903 b := v.Block
25904
25905
25906 for {
25907 idx := v_0
25908 len := v_1
25909 v.reset(OpAMD64SETBE)
25910 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25911 v0.AddArg2(idx, len)
25912 v.AddArg(v0)
25913 return true
25914 }
25915 }
25916 func rewriteValueAMD64_OpLeq16(v *Value) bool {
25917 v_1 := v.Args[1]
25918 v_0 := v.Args[0]
25919 b := v.Block
25920
25921
25922 for {
25923 x := v_0
25924 y := v_1
25925 v.reset(OpAMD64SETLE)
25926 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25927 v0.AddArg2(x, y)
25928 v.AddArg(v0)
25929 return true
25930 }
25931 }
25932 func rewriteValueAMD64_OpLeq16U(v *Value) bool {
25933 v_1 := v.Args[1]
25934 v_0 := v.Args[0]
25935 b := v.Block
25936
25937
25938 for {
25939 x := v_0
25940 y := v_1
25941 v.reset(OpAMD64SETBE)
25942 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25943 v0.AddArg2(x, y)
25944 v.AddArg(v0)
25945 return true
25946 }
25947 }
25948 func rewriteValueAMD64_OpLeq32(v *Value) bool {
25949 v_1 := v.Args[1]
25950 v_0 := v.Args[0]
25951 b := v.Block
25952
25953
25954 for {
25955 x := v_0
25956 y := v_1
25957 v.reset(OpAMD64SETLE)
25958 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25959 v0.AddArg2(x, y)
25960 v.AddArg(v0)
25961 return true
25962 }
25963 }
25964 func rewriteValueAMD64_OpLeq32F(v *Value) bool {
25965 v_1 := v.Args[1]
25966 v_0 := v.Args[0]
25967 b := v.Block
25968
25969
25970 for {
25971 x := v_0
25972 y := v_1
25973 v.reset(OpAMD64SETGEF)
25974 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
25975 v0.AddArg2(y, x)
25976 v.AddArg(v0)
25977 return true
25978 }
25979 }
25980 func rewriteValueAMD64_OpLeq32U(v *Value) bool {
25981 v_1 := v.Args[1]
25982 v_0 := v.Args[0]
25983 b := v.Block
25984
25985
25986 for {
25987 x := v_0
25988 y := v_1
25989 v.reset(OpAMD64SETBE)
25990 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25991 v0.AddArg2(x, y)
25992 v.AddArg(v0)
25993 return true
25994 }
25995 }
25996 func rewriteValueAMD64_OpLeq64(v *Value) bool {
25997 v_1 := v.Args[1]
25998 v_0 := v.Args[0]
25999 b := v.Block
26000
26001
26002 for {
26003 x := v_0
26004 y := v_1
26005 v.reset(OpAMD64SETLE)
26006 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26007 v0.AddArg2(x, y)
26008 v.AddArg(v0)
26009 return true
26010 }
26011 }
26012 func rewriteValueAMD64_OpLeq64F(v *Value) bool {
26013 v_1 := v.Args[1]
26014 v_0 := v.Args[0]
26015 b := v.Block
26016
26017
26018 for {
26019 x := v_0
26020 y := v_1
26021 v.reset(OpAMD64SETGEF)
26022 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
26023 v0.AddArg2(y, x)
26024 v.AddArg(v0)
26025 return true
26026 }
26027 }
26028 func rewriteValueAMD64_OpLeq64U(v *Value) bool {
26029 v_1 := v.Args[1]
26030 v_0 := v.Args[0]
26031 b := v.Block
26032
26033
26034 for {
26035 x := v_0
26036 y := v_1
26037 v.reset(OpAMD64SETBE)
26038 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26039 v0.AddArg2(x, y)
26040 v.AddArg(v0)
26041 return true
26042 }
26043 }
26044 func rewriteValueAMD64_OpLeq8(v *Value) bool {
26045 v_1 := v.Args[1]
26046 v_0 := v.Args[0]
26047 b := v.Block
26048
26049
26050 for {
26051 x := v_0
26052 y := v_1
26053 v.reset(OpAMD64SETLE)
26054 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26055 v0.AddArg2(x, y)
26056 v.AddArg(v0)
26057 return true
26058 }
26059 }
26060 func rewriteValueAMD64_OpLeq8U(v *Value) bool {
26061 v_1 := v.Args[1]
26062 v_0 := v.Args[0]
26063 b := v.Block
26064
26065
26066 for {
26067 x := v_0
26068 y := v_1
26069 v.reset(OpAMD64SETBE)
26070 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26071 v0.AddArg2(x, y)
26072 v.AddArg(v0)
26073 return true
26074 }
26075 }
26076 func rewriteValueAMD64_OpLess16(v *Value) bool {
26077 v_1 := v.Args[1]
26078 v_0 := v.Args[0]
26079 b := v.Block
26080
26081
26082 for {
26083 x := v_0
26084 y := v_1
26085 v.reset(OpAMD64SETL)
26086 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
26087 v0.AddArg2(x, y)
26088 v.AddArg(v0)
26089 return true
26090 }
26091 }
26092 func rewriteValueAMD64_OpLess16U(v *Value) bool {
26093 v_1 := v.Args[1]
26094 v_0 := v.Args[0]
26095 b := v.Block
26096
26097
26098 for {
26099 x := v_0
26100 y := v_1
26101 v.reset(OpAMD64SETB)
26102 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
26103 v0.AddArg2(x, y)
26104 v.AddArg(v0)
26105 return true
26106 }
26107 }
26108 func rewriteValueAMD64_OpLess32(v *Value) bool {
26109 v_1 := v.Args[1]
26110 v_0 := v.Args[0]
26111 b := v.Block
26112
26113
26114 for {
26115 x := v_0
26116 y := v_1
26117 v.reset(OpAMD64SETL)
26118 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26119 v0.AddArg2(x, y)
26120 v.AddArg(v0)
26121 return true
26122 }
26123 }
26124 func rewriteValueAMD64_OpLess32F(v *Value) bool {
26125 v_1 := v.Args[1]
26126 v_0 := v.Args[0]
26127 b := v.Block
26128
26129
26130 for {
26131 x := v_0
26132 y := v_1
26133 v.reset(OpAMD64SETGF)
26134 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
26135 v0.AddArg2(y, x)
26136 v.AddArg(v0)
26137 return true
26138 }
26139 }
26140 func rewriteValueAMD64_OpLess32U(v *Value) bool {
26141 v_1 := v.Args[1]
26142 v_0 := v.Args[0]
26143 b := v.Block
26144
26145
26146 for {
26147 x := v_0
26148 y := v_1
26149 v.reset(OpAMD64SETB)
26150 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26151 v0.AddArg2(x, y)
26152 v.AddArg(v0)
26153 return true
26154 }
26155 }
26156 func rewriteValueAMD64_OpLess64(v *Value) bool {
26157 v_1 := v.Args[1]
26158 v_0 := v.Args[0]
26159 b := v.Block
26160
26161
26162 for {
26163 x := v_0
26164 y := v_1
26165 v.reset(OpAMD64SETL)
26166 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26167 v0.AddArg2(x, y)
26168 v.AddArg(v0)
26169 return true
26170 }
26171 }
26172 func rewriteValueAMD64_OpLess64F(v *Value) bool {
26173 v_1 := v.Args[1]
26174 v_0 := v.Args[0]
26175 b := v.Block
26176
26177
26178 for {
26179 x := v_0
26180 y := v_1
26181 v.reset(OpAMD64SETGF)
26182 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
26183 v0.AddArg2(y, x)
26184 v.AddArg(v0)
26185 return true
26186 }
26187 }
26188 func rewriteValueAMD64_OpLess64U(v *Value) bool {
26189 v_1 := v.Args[1]
26190 v_0 := v.Args[0]
26191 b := v.Block
26192
26193
26194 for {
26195 x := v_0
26196 y := v_1
26197 v.reset(OpAMD64SETB)
26198 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26199 v0.AddArg2(x, y)
26200 v.AddArg(v0)
26201 return true
26202 }
26203 }
26204 func rewriteValueAMD64_OpLess8(v *Value) bool {
26205 v_1 := v.Args[1]
26206 v_0 := v.Args[0]
26207 b := v.Block
26208
26209
26210 for {
26211 x := v_0
26212 y := v_1
26213 v.reset(OpAMD64SETL)
26214 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26215 v0.AddArg2(x, y)
26216 v.AddArg(v0)
26217 return true
26218 }
26219 }
26220 func rewriteValueAMD64_OpLess8U(v *Value) bool {
26221 v_1 := v.Args[1]
26222 v_0 := v.Args[0]
26223 b := v.Block
26224
26225
26226 for {
26227 x := v_0
26228 y := v_1
26229 v.reset(OpAMD64SETB)
26230 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26231 v0.AddArg2(x, y)
26232 v.AddArg(v0)
26233 return true
26234 }
26235 }
26236 func rewriteValueAMD64_OpLoad(v *Value) bool {
26237 v_1 := v.Args[1]
26238 v_0 := v.Args[0]
26239
26240
26241
26242 for {
26243 t := v.Type
26244 ptr := v_0
26245 mem := v_1
26246 if !(is64BitInt(t) || isPtr(t)) {
26247 break
26248 }
26249 v.reset(OpAMD64MOVQload)
26250 v.AddArg2(ptr, mem)
26251 return true
26252 }
26253
26254
26255
26256 for {
26257 t := v.Type
26258 ptr := v_0
26259 mem := v_1
26260 if !(is32BitInt(t)) {
26261 break
26262 }
26263 v.reset(OpAMD64MOVLload)
26264 v.AddArg2(ptr, mem)
26265 return true
26266 }
26267
26268
26269
26270 for {
26271 t := v.Type
26272 ptr := v_0
26273 mem := v_1
26274 if !(is16BitInt(t)) {
26275 break
26276 }
26277 v.reset(OpAMD64MOVWload)
26278 v.AddArg2(ptr, mem)
26279 return true
26280 }
26281
26282
26283
26284 for {
26285 t := v.Type
26286 ptr := v_0
26287 mem := v_1
26288 if !(t.IsBoolean() || is8BitInt(t)) {
26289 break
26290 }
26291 v.reset(OpAMD64MOVBload)
26292 v.AddArg2(ptr, mem)
26293 return true
26294 }
26295
26296
26297
26298 for {
26299 t := v.Type
26300 ptr := v_0
26301 mem := v_1
26302 if !(is32BitFloat(t)) {
26303 break
26304 }
26305 v.reset(OpAMD64MOVSSload)
26306 v.AddArg2(ptr, mem)
26307 return true
26308 }
26309
26310
26311
26312 for {
26313 t := v.Type
26314 ptr := v_0
26315 mem := v_1
26316 if !(is64BitFloat(t)) {
26317 break
26318 }
26319 v.reset(OpAMD64MOVSDload)
26320 v.AddArg2(ptr, mem)
26321 return true
26322 }
26323 return false
26324 }
26325 func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
26326 v_1 := v.Args[1]
26327 v_0 := v.Args[0]
26328 b := v.Block
26329 typ := &b.Func.Config.Types
26330
26331
26332
26333 for {
26334 t := v.Type
26335 sym := auxToSym(v.Aux)
26336 base := v_0
26337 mem := v_1
26338 if !(t.Elem().HasPointers()) {
26339 break
26340 }
26341 v.reset(OpAMD64LEAQ)
26342 v.Aux = symToAux(sym)
26343 v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
26344 v0.AddArg2(base, mem)
26345 v.AddArg(v0)
26346 return true
26347 }
26348
26349
26350
26351 for {
26352 t := v.Type
26353 sym := auxToSym(v.Aux)
26354 base := v_0
26355 if !(!t.Elem().HasPointers()) {
26356 break
26357 }
26358 v.reset(OpAMD64LEAQ)
26359 v.Aux = symToAux(sym)
26360 v.AddArg(base)
26361 return true
26362 }
26363 return false
26364 }
26365 func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
26366 v_1 := v.Args[1]
26367 v_0 := v.Args[0]
26368 b := v.Block
26369
26370
26371
26372 for {
26373 t := v.Type
26374 x := v_0
26375 y := v_1
26376 if !(!shiftIsBounded(v)) {
26377 break
26378 }
26379 v.reset(OpAMD64ANDL)
26380 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26381 v0.AddArg2(x, y)
26382 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26383 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26384 v2.AuxInt = int16ToAuxInt(32)
26385 v2.AddArg(y)
26386 v1.AddArg(v2)
26387 v.AddArg2(v0, v1)
26388 return true
26389 }
26390
26391
26392
26393 for {
26394 x := v_0
26395 y := v_1
26396 if !(shiftIsBounded(v)) {
26397 break
26398 }
26399 v.reset(OpAMD64SHLL)
26400 v.AddArg2(x, y)
26401 return true
26402 }
26403 return false
26404 }
26405 func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
26406 v_1 := v.Args[1]
26407 v_0 := v.Args[0]
26408 b := v.Block
26409
26410
26411
26412 for {
26413 t := v.Type
26414 x := v_0
26415 y := v_1
26416 if !(!shiftIsBounded(v)) {
26417 break
26418 }
26419 v.reset(OpAMD64ANDL)
26420 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26421 v0.AddArg2(x, y)
26422 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26423 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26424 v2.AuxInt = int32ToAuxInt(32)
26425 v2.AddArg(y)
26426 v1.AddArg(v2)
26427 v.AddArg2(v0, v1)
26428 return true
26429 }
26430
26431
26432
26433 for {
26434 x := v_0
26435 y := v_1
26436 if !(shiftIsBounded(v)) {
26437 break
26438 }
26439 v.reset(OpAMD64SHLL)
26440 v.AddArg2(x, y)
26441 return true
26442 }
26443 return false
26444 }
26445 func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
26446 v_1 := v.Args[1]
26447 v_0 := v.Args[0]
26448 b := v.Block
26449
26450
26451
26452 for {
26453 t := v.Type
26454 x := v_0
26455 y := v_1
26456 if !(!shiftIsBounded(v)) {
26457 break
26458 }
26459 v.reset(OpAMD64ANDL)
26460 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26461 v0.AddArg2(x, y)
26462 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26463 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26464 v2.AuxInt = int32ToAuxInt(32)
26465 v2.AddArg(y)
26466 v1.AddArg(v2)
26467 v.AddArg2(v0, v1)
26468 return true
26469 }
26470
26471
26472
26473 for {
26474 x := v_0
26475 y := v_1
26476 if !(shiftIsBounded(v)) {
26477 break
26478 }
26479 v.reset(OpAMD64SHLL)
26480 v.AddArg2(x, y)
26481 return true
26482 }
26483 return false
26484 }
26485 func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
26486 v_1 := v.Args[1]
26487 v_0 := v.Args[0]
26488 b := v.Block
26489
26490
26491
26492 for {
26493 t := v.Type
26494 x := v_0
26495 y := v_1
26496 if !(!shiftIsBounded(v)) {
26497 break
26498 }
26499 v.reset(OpAMD64ANDL)
26500 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26501 v0.AddArg2(x, y)
26502 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26503 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26504 v2.AuxInt = int8ToAuxInt(32)
26505 v2.AddArg(y)
26506 v1.AddArg(v2)
26507 v.AddArg2(v0, v1)
26508 return true
26509 }
26510
26511
26512
26513 for {
26514 x := v_0
26515 y := v_1
26516 if !(shiftIsBounded(v)) {
26517 break
26518 }
26519 v.reset(OpAMD64SHLL)
26520 v.AddArg2(x, y)
26521 return true
26522 }
26523 return false
26524 }
26525 func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
26526 v_1 := v.Args[1]
26527 v_0 := v.Args[0]
26528 b := v.Block
26529
26530
26531
26532 for {
26533 t := v.Type
26534 x := v_0
26535 y := v_1
26536 if !(!shiftIsBounded(v)) {
26537 break
26538 }
26539 v.reset(OpAMD64ANDL)
26540 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26541 v0.AddArg2(x, y)
26542 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26543 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26544 v2.AuxInt = int16ToAuxInt(32)
26545 v2.AddArg(y)
26546 v1.AddArg(v2)
26547 v.AddArg2(v0, v1)
26548 return true
26549 }
26550
26551
26552
26553 for {
26554 x := v_0
26555 y := v_1
26556 if !(shiftIsBounded(v)) {
26557 break
26558 }
26559 v.reset(OpAMD64SHLL)
26560 v.AddArg2(x, y)
26561 return true
26562 }
26563 return false
26564 }
26565 func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
26566 v_1 := v.Args[1]
26567 v_0 := v.Args[0]
26568 b := v.Block
26569
26570
26571
26572 for {
26573 t := v.Type
26574 x := v_0
26575 y := v_1
26576 if !(!shiftIsBounded(v)) {
26577 break
26578 }
26579 v.reset(OpAMD64ANDL)
26580 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26581 v0.AddArg2(x, y)
26582 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26583 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26584 v2.AuxInt = int32ToAuxInt(32)
26585 v2.AddArg(y)
26586 v1.AddArg(v2)
26587 v.AddArg2(v0, v1)
26588 return true
26589 }
26590
26591
26592
26593 for {
26594 x := v_0
26595 y := v_1
26596 if !(shiftIsBounded(v)) {
26597 break
26598 }
26599 v.reset(OpAMD64SHLL)
26600 v.AddArg2(x, y)
26601 return true
26602 }
26603 return false
26604 }
26605 func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
26606 v_1 := v.Args[1]
26607 v_0 := v.Args[0]
26608 b := v.Block
26609
26610
26611
26612 for {
26613 t := v.Type
26614 x := v_0
26615 y := v_1
26616 if !(!shiftIsBounded(v)) {
26617 break
26618 }
26619 v.reset(OpAMD64ANDL)
26620 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26621 v0.AddArg2(x, y)
26622 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26623 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26624 v2.AuxInt = int32ToAuxInt(32)
26625 v2.AddArg(y)
26626 v1.AddArg(v2)
26627 v.AddArg2(v0, v1)
26628 return true
26629 }
26630
26631
26632
26633 for {
26634 x := v_0
26635 y := v_1
26636 if !(shiftIsBounded(v)) {
26637 break
26638 }
26639 v.reset(OpAMD64SHLL)
26640 v.AddArg2(x, y)
26641 return true
26642 }
26643 return false
26644 }
26645 func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
26646 v_1 := v.Args[1]
26647 v_0 := v.Args[0]
26648 b := v.Block
26649
26650
26651
26652 for {
26653 t := v.Type
26654 x := v_0
26655 y := v_1
26656 if !(!shiftIsBounded(v)) {
26657 break
26658 }
26659 v.reset(OpAMD64ANDL)
26660 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26661 v0.AddArg2(x, y)
26662 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26663 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26664 v2.AuxInt = int8ToAuxInt(32)
26665 v2.AddArg(y)
26666 v1.AddArg(v2)
26667 v.AddArg2(v0, v1)
26668 return true
26669 }
26670
26671
26672
26673 for {
26674 x := v_0
26675 y := v_1
26676 if !(shiftIsBounded(v)) {
26677 break
26678 }
26679 v.reset(OpAMD64SHLL)
26680 v.AddArg2(x, y)
26681 return true
26682 }
26683 return false
26684 }
26685 func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
26686 v_1 := v.Args[1]
26687 v_0 := v.Args[0]
26688 b := v.Block
26689
26690
26691
26692 for {
26693 t := v.Type
26694 x := v_0
26695 y := v_1
26696 if !(!shiftIsBounded(v)) {
26697 break
26698 }
26699 v.reset(OpAMD64ANDQ)
26700 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26701 v0.AddArg2(x, y)
26702 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26703 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26704 v2.AuxInt = int16ToAuxInt(64)
26705 v2.AddArg(y)
26706 v1.AddArg(v2)
26707 v.AddArg2(v0, v1)
26708 return true
26709 }
26710
26711
26712
26713 for {
26714 x := v_0
26715 y := v_1
26716 if !(shiftIsBounded(v)) {
26717 break
26718 }
26719 v.reset(OpAMD64SHLQ)
26720 v.AddArg2(x, y)
26721 return true
26722 }
26723 return false
26724 }
26725 func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
26726 v_1 := v.Args[1]
26727 v_0 := v.Args[0]
26728 b := v.Block
26729
26730
26731
26732 for {
26733 t := v.Type
26734 x := v_0
26735 y := v_1
26736 if !(!shiftIsBounded(v)) {
26737 break
26738 }
26739 v.reset(OpAMD64ANDQ)
26740 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26741 v0.AddArg2(x, y)
26742 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26743 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26744 v2.AuxInt = int32ToAuxInt(64)
26745 v2.AddArg(y)
26746 v1.AddArg(v2)
26747 v.AddArg2(v0, v1)
26748 return true
26749 }
26750
26751
26752
26753 for {
26754 x := v_0
26755 y := v_1
26756 if !(shiftIsBounded(v)) {
26757 break
26758 }
26759 v.reset(OpAMD64SHLQ)
26760 v.AddArg2(x, y)
26761 return true
26762 }
26763 return false
26764 }
26765 func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
26766 v_1 := v.Args[1]
26767 v_0 := v.Args[0]
26768 b := v.Block
26769
26770
26771
26772 for {
26773 t := v.Type
26774 x := v_0
26775 y := v_1
26776 if !(!shiftIsBounded(v)) {
26777 break
26778 }
26779 v.reset(OpAMD64ANDQ)
26780 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26781 v0.AddArg2(x, y)
26782 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26783 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26784 v2.AuxInt = int32ToAuxInt(64)
26785 v2.AddArg(y)
26786 v1.AddArg(v2)
26787 v.AddArg2(v0, v1)
26788 return true
26789 }
26790
26791
26792
26793 for {
26794 x := v_0
26795 y := v_1
26796 if !(shiftIsBounded(v)) {
26797 break
26798 }
26799 v.reset(OpAMD64SHLQ)
26800 v.AddArg2(x, y)
26801 return true
26802 }
26803 return false
26804 }
26805 func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
26806 v_1 := v.Args[1]
26807 v_0 := v.Args[0]
26808 b := v.Block
26809
26810
26811
26812 for {
26813 t := v.Type
26814 x := v_0
26815 y := v_1
26816 if !(!shiftIsBounded(v)) {
26817 break
26818 }
26819 v.reset(OpAMD64ANDQ)
26820 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26821 v0.AddArg2(x, y)
26822 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26823 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26824 v2.AuxInt = int8ToAuxInt(64)
26825 v2.AddArg(y)
26826 v1.AddArg(v2)
26827 v.AddArg2(v0, v1)
26828 return true
26829 }
26830
26831
26832
26833 for {
26834 x := v_0
26835 y := v_1
26836 if !(shiftIsBounded(v)) {
26837 break
26838 }
26839 v.reset(OpAMD64SHLQ)
26840 v.AddArg2(x, y)
26841 return true
26842 }
26843 return false
26844 }
26845 func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
26846 v_1 := v.Args[1]
26847 v_0 := v.Args[0]
26848 b := v.Block
26849
26850
26851
26852 for {
26853 t := v.Type
26854 x := v_0
26855 y := v_1
26856 if !(!shiftIsBounded(v)) {
26857 break
26858 }
26859 v.reset(OpAMD64ANDL)
26860 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26861 v0.AddArg2(x, y)
26862 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26863 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26864 v2.AuxInt = int16ToAuxInt(32)
26865 v2.AddArg(y)
26866 v1.AddArg(v2)
26867 v.AddArg2(v0, v1)
26868 return true
26869 }
26870
26871
26872
26873 for {
26874 x := v_0
26875 y := v_1
26876 if !(shiftIsBounded(v)) {
26877 break
26878 }
26879 v.reset(OpAMD64SHLL)
26880 v.AddArg2(x, y)
26881 return true
26882 }
26883 return false
26884 }
26885 func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
26886 v_1 := v.Args[1]
26887 v_0 := v.Args[0]
26888 b := v.Block
26889
26890
26891
26892 for {
26893 t := v.Type
26894 x := v_0
26895 y := v_1
26896 if !(!shiftIsBounded(v)) {
26897 break
26898 }
26899 v.reset(OpAMD64ANDL)
26900 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26901 v0.AddArg2(x, y)
26902 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26903 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26904 v2.AuxInt = int32ToAuxInt(32)
26905 v2.AddArg(y)
26906 v1.AddArg(v2)
26907 v.AddArg2(v0, v1)
26908 return true
26909 }
26910
26911
26912
26913 for {
26914 x := v_0
26915 y := v_1
26916 if !(shiftIsBounded(v)) {
26917 break
26918 }
26919 v.reset(OpAMD64SHLL)
26920 v.AddArg2(x, y)
26921 return true
26922 }
26923 return false
26924 }
26925 func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
26926 v_1 := v.Args[1]
26927 v_0 := v.Args[0]
26928 b := v.Block
26929
26930
26931
26932 for {
26933 t := v.Type
26934 x := v_0
26935 y := v_1
26936 if !(!shiftIsBounded(v)) {
26937 break
26938 }
26939 v.reset(OpAMD64ANDL)
26940 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26941 v0.AddArg2(x, y)
26942 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26943 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26944 v2.AuxInt = int32ToAuxInt(32)
26945 v2.AddArg(y)
26946 v1.AddArg(v2)
26947 v.AddArg2(v0, v1)
26948 return true
26949 }
26950
26951
26952
26953 for {
26954 x := v_0
26955 y := v_1
26956 if !(shiftIsBounded(v)) {
26957 break
26958 }
26959 v.reset(OpAMD64SHLL)
26960 v.AddArg2(x, y)
26961 return true
26962 }
26963 return false
26964 }
26965 func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
26966 v_1 := v.Args[1]
26967 v_0 := v.Args[0]
26968 b := v.Block
26969
26970
26971
26972 for {
26973 t := v.Type
26974 x := v_0
26975 y := v_1
26976 if !(!shiftIsBounded(v)) {
26977 break
26978 }
26979 v.reset(OpAMD64ANDL)
26980 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26981 v0.AddArg2(x, y)
26982 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26983 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26984 v2.AuxInt = int8ToAuxInt(32)
26985 v2.AddArg(y)
26986 v1.AddArg(v2)
26987 v.AddArg2(v0, v1)
26988 return true
26989 }
26990
26991
26992
26993 for {
26994 x := v_0
26995 y := v_1
26996 if !(shiftIsBounded(v)) {
26997 break
26998 }
26999 v.reset(OpAMD64SHLL)
27000 v.AddArg2(x, y)
27001 return true
27002 }
27003 return false
27004 }
27005 func rewriteValueAMD64_OpMax32F(v *Value) bool {
27006 v_1 := v.Args[1]
27007 v_0 := v.Args[0]
27008 b := v.Block
27009
27010
27011 for {
27012 t := v.Type
27013 x := v_0
27014 y := v_1
27015 v.reset(OpNeg32F)
27016 v.Type = t
27017 v0 := b.NewValue0(v.Pos, OpMin32F, t)
27018 v1 := b.NewValue0(v.Pos, OpNeg32F, t)
27019 v1.AddArg(x)
27020 v2 := b.NewValue0(v.Pos, OpNeg32F, t)
27021 v2.AddArg(y)
27022 v0.AddArg2(v1, v2)
27023 v.AddArg(v0)
27024 return true
27025 }
27026 }
27027 func rewriteValueAMD64_OpMax64F(v *Value) bool {
27028 v_1 := v.Args[1]
27029 v_0 := v.Args[0]
27030 b := v.Block
27031
27032
27033 for {
27034 t := v.Type
27035 x := v_0
27036 y := v_1
27037 v.reset(OpNeg64F)
27038 v.Type = t
27039 v0 := b.NewValue0(v.Pos, OpMin64F, t)
27040 v1 := b.NewValue0(v.Pos, OpNeg64F, t)
27041 v1.AddArg(x)
27042 v2 := b.NewValue0(v.Pos, OpNeg64F, t)
27043 v2.AddArg(y)
27044 v0.AddArg2(v1, v2)
27045 v.AddArg(v0)
27046 return true
27047 }
27048 }
27049 func rewriteValueAMD64_OpMin32F(v *Value) bool {
27050 v_1 := v.Args[1]
27051 v_0 := v.Args[0]
27052 b := v.Block
27053
27054
27055 for {
27056 t := v.Type
27057 x := v_0
27058 y := v_1
27059 v.reset(OpAMD64POR)
27060 v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
27061 v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
27062 v1.AddArg2(x, y)
27063 v0.AddArg2(v1, x)
27064 v.AddArg2(v0, v1)
27065 return true
27066 }
27067 }
27068 func rewriteValueAMD64_OpMin64F(v *Value) bool {
27069 v_1 := v.Args[1]
27070 v_0 := v.Args[0]
27071 b := v.Block
27072
27073
27074 for {
27075 t := v.Type
27076 x := v_0
27077 y := v_1
27078 v.reset(OpAMD64POR)
27079 v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
27080 v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
27081 v1.AddArg2(x, y)
27082 v0.AddArg2(v1, x)
27083 v.AddArg2(v0, v1)
27084 return true
27085 }
27086 }
27087 func rewriteValueAMD64_OpMod16(v *Value) bool {
27088 v_1 := v.Args[1]
27089 v_0 := v.Args[0]
27090 b := v.Block
27091 typ := &b.Func.Config.Types
27092
27093
27094 for {
27095 a := auxIntToBool(v.AuxInt)
27096 x := v_0
27097 y := v_1
27098 v.reset(OpSelect1)
27099 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
27100 v0.AuxInt = boolToAuxInt(a)
27101 v0.AddArg2(x, y)
27102 v.AddArg(v0)
27103 return true
27104 }
27105 }
27106 func rewriteValueAMD64_OpMod16u(v *Value) bool {
27107 v_1 := v.Args[1]
27108 v_0 := v.Args[0]
27109 b := v.Block
27110 typ := &b.Func.Config.Types
27111
27112
27113 for {
27114 x := v_0
27115 y := v_1
27116 v.reset(OpSelect1)
27117 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
27118 v0.AddArg2(x, y)
27119 v.AddArg(v0)
27120 return true
27121 }
27122 }
27123 func rewriteValueAMD64_OpMod32(v *Value) bool {
27124 v_1 := v.Args[1]
27125 v_0 := v.Args[0]
27126 b := v.Block
27127 typ := &b.Func.Config.Types
27128
27129
27130 for {
27131 a := auxIntToBool(v.AuxInt)
27132 x := v_0
27133 y := v_1
27134 v.reset(OpSelect1)
27135 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
27136 v0.AuxInt = boolToAuxInt(a)
27137 v0.AddArg2(x, y)
27138 v.AddArg(v0)
27139 return true
27140 }
27141 }
27142 func rewriteValueAMD64_OpMod32u(v *Value) bool {
27143 v_1 := v.Args[1]
27144 v_0 := v.Args[0]
27145 b := v.Block
27146 typ := &b.Func.Config.Types
27147
27148
27149 for {
27150 x := v_0
27151 y := v_1
27152 v.reset(OpSelect1)
27153 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
27154 v0.AddArg2(x, y)
27155 v.AddArg(v0)
27156 return true
27157 }
27158 }
27159 func rewriteValueAMD64_OpMod64(v *Value) bool {
27160 v_1 := v.Args[1]
27161 v_0 := v.Args[0]
27162 b := v.Block
27163 typ := &b.Func.Config.Types
27164
27165
27166 for {
27167 a := auxIntToBool(v.AuxInt)
27168 x := v_0
27169 y := v_1
27170 v.reset(OpSelect1)
27171 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
27172 v0.AuxInt = boolToAuxInt(a)
27173 v0.AddArg2(x, y)
27174 v.AddArg(v0)
27175 return true
27176 }
27177 }
27178 func rewriteValueAMD64_OpMod64u(v *Value) bool {
27179 v_1 := v.Args[1]
27180 v_0 := v.Args[0]
27181 b := v.Block
27182 typ := &b.Func.Config.Types
27183
27184
27185 for {
27186 x := v_0
27187 y := v_1
27188 v.reset(OpSelect1)
27189 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
27190 v0.AddArg2(x, y)
27191 v.AddArg(v0)
27192 return true
27193 }
27194 }
27195 func rewriteValueAMD64_OpMod8(v *Value) bool {
27196 v_1 := v.Args[1]
27197 v_0 := v.Args[0]
27198 b := v.Block
27199 typ := &b.Func.Config.Types
27200
27201
27202 for {
27203 x := v_0
27204 y := v_1
27205 v.reset(OpSelect1)
27206 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
27207 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27208 v1.AddArg(x)
27209 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27210 v2.AddArg(y)
27211 v0.AddArg2(v1, v2)
27212 v.AddArg(v0)
27213 return true
27214 }
27215 }
27216 func rewriteValueAMD64_OpMod8u(v *Value) bool {
27217 v_1 := v.Args[1]
27218 v_0 := v.Args[0]
27219 b := v.Block
27220 typ := &b.Func.Config.Types
27221
27222
27223 for {
27224 x := v_0
27225 y := v_1
27226 v.reset(OpSelect1)
27227 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
27228 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27229 v1.AddArg(x)
27230 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27231 v2.AddArg(y)
27232 v0.AddArg2(v1, v2)
27233 v.AddArg(v0)
27234 return true
27235 }
27236 }
27237 func rewriteValueAMD64_OpMove(v *Value) bool {
27238 v_2 := v.Args[2]
27239 v_1 := v.Args[1]
27240 v_0 := v.Args[0]
27241 b := v.Block
27242 config := b.Func.Config
27243 typ := &b.Func.Config.Types
27244
27245
27246 for {
27247 if auxIntToInt64(v.AuxInt) != 0 {
27248 break
27249 }
27250 mem := v_2
27251 v.copyOf(mem)
27252 return true
27253 }
27254
27255
27256 for {
27257 if auxIntToInt64(v.AuxInt) != 1 {
27258 break
27259 }
27260 dst := v_0
27261 src := v_1
27262 mem := v_2
27263 v.reset(OpAMD64MOVBstore)
27264 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27265 v0.AddArg2(src, mem)
27266 v.AddArg3(dst, v0, mem)
27267 return true
27268 }
27269
27270
27271 for {
27272 if auxIntToInt64(v.AuxInt) != 2 {
27273 break
27274 }
27275 dst := v_0
27276 src := v_1
27277 mem := v_2
27278 v.reset(OpAMD64MOVWstore)
27279 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27280 v0.AddArg2(src, mem)
27281 v.AddArg3(dst, v0, mem)
27282 return true
27283 }
27284
27285
27286 for {
27287 if auxIntToInt64(v.AuxInt) != 4 {
27288 break
27289 }
27290 dst := v_0
27291 src := v_1
27292 mem := v_2
27293 v.reset(OpAMD64MOVLstore)
27294 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27295 v0.AddArg2(src, mem)
27296 v.AddArg3(dst, v0, mem)
27297 return true
27298 }
27299
27300
27301 for {
27302 if auxIntToInt64(v.AuxInt) != 8 {
27303 break
27304 }
27305 dst := v_0
27306 src := v_1
27307 mem := v_2
27308 v.reset(OpAMD64MOVQstore)
27309 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27310 v0.AddArg2(src, mem)
27311 v.AddArg3(dst, v0, mem)
27312 return true
27313 }
27314
27315
27316
27317 for {
27318 if auxIntToInt64(v.AuxInt) != 16 {
27319 break
27320 }
27321 dst := v_0
27322 src := v_1
27323 mem := v_2
27324 if !(config.useSSE) {
27325 break
27326 }
27327 v.reset(OpAMD64MOVOstore)
27328 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27329 v0.AddArg2(src, mem)
27330 v.AddArg3(dst, v0, mem)
27331 return true
27332 }
27333
27334
27335
27336 for {
27337 if auxIntToInt64(v.AuxInt) != 16 {
27338 break
27339 }
27340 dst := v_0
27341 src := v_1
27342 mem := v_2
27343 if !(!config.useSSE) {
27344 break
27345 }
27346 v.reset(OpAMD64MOVQstore)
27347 v.AuxInt = int32ToAuxInt(8)
27348 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27349 v0.AuxInt = int32ToAuxInt(8)
27350 v0.AddArg2(src, mem)
27351 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27352 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27353 v2.AddArg2(src, mem)
27354 v1.AddArg3(dst, v2, mem)
27355 v.AddArg3(dst, v0, v1)
27356 return true
27357 }
27358
27359
27360 for {
27361 if auxIntToInt64(v.AuxInt) != 32 {
27362 break
27363 }
27364 dst := v_0
27365 src := v_1
27366 mem := v_2
27367 v.reset(OpMove)
27368 v.AuxInt = int64ToAuxInt(16)
27369 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27370 v0.AuxInt = int64ToAuxInt(16)
27371 v0.AddArg(dst)
27372 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27373 v1.AuxInt = int64ToAuxInt(16)
27374 v1.AddArg(src)
27375 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27376 v2.AuxInt = int64ToAuxInt(16)
27377 v2.AddArg3(dst, src, mem)
27378 v.AddArg3(v0, v1, v2)
27379 return true
27380 }
27381
27382
27383
27384 for {
27385 if auxIntToInt64(v.AuxInt) != 48 {
27386 break
27387 }
27388 dst := v_0
27389 src := v_1
27390 mem := v_2
27391 if !(config.useSSE) {
27392 break
27393 }
27394 v.reset(OpMove)
27395 v.AuxInt = int64ToAuxInt(32)
27396 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27397 v0.AuxInt = int64ToAuxInt(16)
27398 v0.AddArg(dst)
27399 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27400 v1.AuxInt = int64ToAuxInt(16)
27401 v1.AddArg(src)
27402 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27403 v2.AuxInt = int64ToAuxInt(16)
27404 v2.AddArg3(dst, src, mem)
27405 v.AddArg3(v0, v1, v2)
27406 return true
27407 }
27408
27409
27410
27411 for {
27412 if auxIntToInt64(v.AuxInt) != 64 {
27413 break
27414 }
27415 dst := v_0
27416 src := v_1
27417 mem := v_2
27418 if !(config.useSSE) {
27419 break
27420 }
27421 v.reset(OpMove)
27422 v.AuxInt = int64ToAuxInt(32)
27423 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27424 v0.AuxInt = int64ToAuxInt(32)
27425 v0.AddArg(dst)
27426 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27427 v1.AuxInt = int64ToAuxInt(32)
27428 v1.AddArg(src)
27429 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27430 v2.AuxInt = int64ToAuxInt(32)
27431 v2.AddArg3(dst, src, mem)
27432 v.AddArg3(v0, v1, v2)
27433 return true
27434 }
27435
27436
27437 for {
27438 if auxIntToInt64(v.AuxInt) != 3 {
27439 break
27440 }
27441 dst := v_0
27442 src := v_1
27443 mem := v_2
27444 v.reset(OpAMD64MOVBstore)
27445 v.AuxInt = int32ToAuxInt(2)
27446 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27447 v0.AuxInt = int32ToAuxInt(2)
27448 v0.AddArg2(src, mem)
27449 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
27450 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27451 v2.AddArg2(src, mem)
27452 v1.AddArg3(dst, v2, mem)
27453 v.AddArg3(dst, v0, v1)
27454 return true
27455 }
27456
27457
27458 for {
27459 if auxIntToInt64(v.AuxInt) != 5 {
27460 break
27461 }
27462 dst := v_0
27463 src := v_1
27464 mem := v_2
27465 v.reset(OpAMD64MOVBstore)
27466 v.AuxInt = int32ToAuxInt(4)
27467 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27468 v0.AuxInt = int32ToAuxInt(4)
27469 v0.AddArg2(src, mem)
27470 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27471 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27472 v2.AddArg2(src, mem)
27473 v1.AddArg3(dst, v2, mem)
27474 v.AddArg3(dst, v0, v1)
27475 return true
27476 }
27477
27478
27479 for {
27480 if auxIntToInt64(v.AuxInt) != 6 {
27481 break
27482 }
27483 dst := v_0
27484 src := v_1
27485 mem := v_2
27486 v.reset(OpAMD64MOVWstore)
27487 v.AuxInt = int32ToAuxInt(4)
27488 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27489 v0.AuxInt = int32ToAuxInt(4)
27490 v0.AddArg2(src, mem)
27491 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27492 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27493 v2.AddArg2(src, mem)
27494 v1.AddArg3(dst, v2, mem)
27495 v.AddArg3(dst, v0, v1)
27496 return true
27497 }
27498
27499
27500 for {
27501 if auxIntToInt64(v.AuxInt) != 7 {
27502 break
27503 }
27504 dst := v_0
27505 src := v_1
27506 mem := v_2
27507 v.reset(OpAMD64MOVLstore)
27508 v.AuxInt = int32ToAuxInt(3)
27509 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27510 v0.AuxInt = int32ToAuxInt(3)
27511 v0.AddArg2(src, mem)
27512 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27513 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27514 v2.AddArg2(src, mem)
27515 v1.AddArg3(dst, v2, mem)
27516 v.AddArg3(dst, v0, v1)
27517 return true
27518 }
27519
27520
27521 for {
27522 if auxIntToInt64(v.AuxInt) != 9 {
27523 break
27524 }
27525 dst := v_0
27526 src := v_1
27527 mem := v_2
27528 v.reset(OpAMD64MOVBstore)
27529 v.AuxInt = int32ToAuxInt(8)
27530 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27531 v0.AuxInt = int32ToAuxInt(8)
27532 v0.AddArg2(src, mem)
27533 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27534 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27535 v2.AddArg2(src, mem)
27536 v1.AddArg3(dst, v2, mem)
27537 v.AddArg3(dst, v0, v1)
27538 return true
27539 }
27540
27541
27542 for {
27543 if auxIntToInt64(v.AuxInt) != 10 {
27544 break
27545 }
27546 dst := v_0
27547 src := v_1
27548 mem := v_2
27549 v.reset(OpAMD64MOVWstore)
27550 v.AuxInt = int32ToAuxInt(8)
27551 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27552 v0.AuxInt = int32ToAuxInt(8)
27553 v0.AddArg2(src, mem)
27554 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27555 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27556 v2.AddArg2(src, mem)
27557 v1.AddArg3(dst, v2, mem)
27558 v.AddArg3(dst, v0, v1)
27559 return true
27560 }
27561
27562
27563 for {
27564 if auxIntToInt64(v.AuxInt) != 11 {
27565 break
27566 }
27567 dst := v_0
27568 src := v_1
27569 mem := v_2
27570 v.reset(OpAMD64MOVLstore)
27571 v.AuxInt = int32ToAuxInt(7)
27572 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27573 v0.AuxInt = int32ToAuxInt(7)
27574 v0.AddArg2(src, mem)
27575 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27576 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27577 v2.AddArg2(src, mem)
27578 v1.AddArg3(dst, v2, mem)
27579 v.AddArg3(dst, v0, v1)
27580 return true
27581 }
27582
27583
27584 for {
27585 if auxIntToInt64(v.AuxInt) != 12 {
27586 break
27587 }
27588 dst := v_0
27589 src := v_1
27590 mem := v_2
27591 v.reset(OpAMD64MOVLstore)
27592 v.AuxInt = int32ToAuxInt(8)
27593 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27594 v0.AuxInt = int32ToAuxInt(8)
27595 v0.AddArg2(src, mem)
27596 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27597 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27598 v2.AddArg2(src, mem)
27599 v1.AddArg3(dst, v2, mem)
27600 v.AddArg3(dst, v0, v1)
27601 return true
27602 }
27603
27604
27605
27606 for {
27607 s := auxIntToInt64(v.AuxInt)
27608 dst := v_0
27609 src := v_1
27610 mem := v_2
27611 if !(s >= 13 && s <= 15) {
27612 break
27613 }
27614 v.reset(OpAMD64MOVQstore)
27615 v.AuxInt = int32ToAuxInt(int32(s - 8))
27616 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27617 v0.AuxInt = int32ToAuxInt(int32(s - 8))
27618 v0.AddArg2(src, mem)
27619 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27620 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27621 v2.AddArg2(src, mem)
27622 v1.AddArg3(dst, v2, mem)
27623 v.AddArg3(dst, v0, v1)
27624 return true
27625 }
27626
27627
27628
27629 for {
27630 s := auxIntToInt64(v.AuxInt)
27631 dst := v_0
27632 src := v_1
27633 mem := v_2
27634 if !(s > 16 && s%16 != 0 && s%16 <= 8) {
27635 break
27636 }
27637 v.reset(OpMove)
27638 v.AuxInt = int64ToAuxInt(s - s%16)
27639 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27640 v0.AuxInt = int64ToAuxInt(s % 16)
27641 v0.AddArg(dst)
27642 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27643 v1.AuxInt = int64ToAuxInt(s % 16)
27644 v1.AddArg(src)
27645 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27646 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27647 v3.AddArg2(src, mem)
27648 v2.AddArg3(dst, v3, mem)
27649 v.AddArg3(v0, v1, v2)
27650 return true
27651 }
27652
27653
27654
27655 for {
27656 s := auxIntToInt64(v.AuxInt)
27657 dst := v_0
27658 src := v_1
27659 mem := v_2
27660 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
27661 break
27662 }
27663 v.reset(OpMove)
27664 v.AuxInt = int64ToAuxInt(s - s%16)
27665 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27666 v0.AuxInt = int64ToAuxInt(s % 16)
27667 v0.AddArg(dst)
27668 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27669 v1.AuxInt = int64ToAuxInt(s % 16)
27670 v1.AddArg(src)
27671 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
27672 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27673 v3.AddArg2(src, mem)
27674 v2.AddArg3(dst, v3, mem)
27675 v.AddArg3(v0, v1, v2)
27676 return true
27677 }
27678
27679
27680
27681 for {
27682 s := auxIntToInt64(v.AuxInt)
27683 dst := v_0
27684 src := v_1
27685 mem := v_2
27686 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
27687 break
27688 }
27689 v.reset(OpMove)
27690 v.AuxInt = int64ToAuxInt(s - s%16)
27691 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27692 v0.AuxInt = int64ToAuxInt(s % 16)
27693 v0.AddArg(dst)
27694 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27695 v1.AuxInt = int64ToAuxInt(s % 16)
27696 v1.AddArg(src)
27697 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27698 v2.AuxInt = int32ToAuxInt(8)
27699 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27700 v3.AuxInt = int32ToAuxInt(8)
27701 v3.AddArg2(src, mem)
27702 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27703 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27704 v5.AddArg2(src, mem)
27705 v4.AddArg3(dst, v5, mem)
27706 v2.AddArg3(dst, v3, v4)
27707 v.AddArg3(v0, v1, v2)
27708 return true
27709 }
27710
27711
27712
27713 for {
27714 s := auxIntToInt64(v.AuxInt)
27715 dst := v_0
27716 src := v_1
27717 mem := v_2
27718 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
27719 break
27720 }
27721 v.reset(OpAMD64DUFFCOPY)
27722 v.AuxInt = int64ToAuxInt(s)
27723 v.AddArg3(dst, src, mem)
27724 return true
27725 }
27726
27727
27728
27729 for {
27730 s := auxIntToInt64(v.AuxInt)
27731 dst := v_0
27732 src := v_1
27733 mem := v_2
27734 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) {
27735 break
27736 }
27737 v.reset(OpAMD64REPMOVSQ)
27738 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
27739 v0.AuxInt = int64ToAuxInt(s / 8)
27740 v.AddArg4(dst, src, v0, mem)
27741 return true
27742 }
27743 return false
27744 }
27745 func rewriteValueAMD64_OpNeg32F(v *Value) bool {
27746 v_0 := v.Args[0]
27747 b := v.Block
27748 typ := &b.Func.Config.Types
27749
27750
27751 for {
27752 x := v_0
27753 v.reset(OpAMD64PXOR)
27754 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
27755 v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
27756 v.AddArg2(x, v0)
27757 return true
27758 }
27759 }
27760 func rewriteValueAMD64_OpNeg64F(v *Value) bool {
27761 v_0 := v.Args[0]
27762 b := v.Block
27763 typ := &b.Func.Config.Types
27764
27765
27766 for {
27767 x := v_0
27768 v.reset(OpAMD64PXOR)
27769 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
27770 v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
27771 v.AddArg2(x, v0)
27772 return true
27773 }
27774 }
27775 func rewriteValueAMD64_OpNeq16(v *Value) bool {
27776 v_1 := v.Args[1]
27777 v_0 := v.Args[0]
27778 b := v.Block
27779
27780
27781 for {
27782 x := v_0
27783 y := v_1
27784 v.reset(OpAMD64SETNE)
27785 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
27786 v0.AddArg2(x, y)
27787 v.AddArg(v0)
27788 return true
27789 }
27790 }
27791 func rewriteValueAMD64_OpNeq32(v *Value) bool {
27792 v_1 := v.Args[1]
27793 v_0 := v.Args[0]
27794 b := v.Block
27795
27796
27797 for {
27798 x := v_0
27799 y := v_1
27800 v.reset(OpAMD64SETNE)
27801 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
27802 v0.AddArg2(x, y)
27803 v.AddArg(v0)
27804 return true
27805 }
27806 }
27807 func rewriteValueAMD64_OpNeq32F(v *Value) bool {
27808 v_1 := v.Args[1]
27809 v_0 := v.Args[0]
27810 b := v.Block
27811
27812
27813 for {
27814 x := v_0
27815 y := v_1
27816 v.reset(OpAMD64SETNEF)
27817 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
27818 v0.AddArg2(x, y)
27819 v.AddArg(v0)
27820 return true
27821 }
27822 }
27823 func rewriteValueAMD64_OpNeq64(v *Value) bool {
27824 v_1 := v.Args[1]
27825 v_0 := v.Args[0]
27826 b := v.Block
27827
27828
27829 for {
27830 x := v_0
27831 y := v_1
27832 v.reset(OpAMD64SETNE)
27833 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
27834 v0.AddArg2(x, y)
27835 v.AddArg(v0)
27836 return true
27837 }
27838 }
27839 func rewriteValueAMD64_OpNeq64F(v *Value) bool {
27840 v_1 := v.Args[1]
27841 v_0 := v.Args[0]
27842 b := v.Block
27843
27844
27845 for {
27846 x := v_0
27847 y := v_1
27848 v.reset(OpAMD64SETNEF)
27849 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
27850 v0.AddArg2(x, y)
27851 v.AddArg(v0)
27852 return true
27853 }
27854 }
27855 func rewriteValueAMD64_OpNeq8(v *Value) bool {
27856 v_1 := v.Args[1]
27857 v_0 := v.Args[0]
27858 b := v.Block
27859
27860
27861 for {
27862 x := v_0
27863 y := v_1
27864 v.reset(OpAMD64SETNE)
27865 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
27866 v0.AddArg2(x, y)
27867 v.AddArg(v0)
27868 return true
27869 }
27870 }
27871 func rewriteValueAMD64_OpNeqB(v *Value) bool {
27872 v_1 := v.Args[1]
27873 v_0 := v.Args[0]
27874 b := v.Block
27875
27876
27877 for {
27878 x := v_0
27879 y := v_1
27880 v.reset(OpAMD64SETNE)
27881 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
27882 v0.AddArg2(x, y)
27883 v.AddArg(v0)
27884 return true
27885 }
27886 }
27887 func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
27888 v_1 := v.Args[1]
27889 v_0 := v.Args[0]
27890 b := v.Block
27891
27892
27893 for {
27894 x := v_0
27895 y := v_1
27896 v.reset(OpAMD64SETNE)
27897 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
27898 v0.AddArg2(x, y)
27899 v.AddArg(v0)
27900 return true
27901 }
27902 }
27903 func rewriteValueAMD64_OpNot(v *Value) bool {
27904 v_0 := v.Args[0]
27905
27906
27907 for {
27908 x := v_0
27909 v.reset(OpAMD64XORLconst)
27910 v.AuxInt = int32ToAuxInt(1)
27911 v.AddArg(x)
27912 return true
27913 }
27914 }
27915 func rewriteValueAMD64_OpOffPtr(v *Value) bool {
27916 v_0 := v.Args[0]
27917 b := v.Block
27918 typ := &b.Func.Config.Types
27919
27920
27921
27922 for {
27923 off := auxIntToInt64(v.AuxInt)
27924 ptr := v_0
27925 if !(is32Bit(off)) {
27926 break
27927 }
27928 v.reset(OpAMD64ADDQconst)
27929 v.AuxInt = int32ToAuxInt(int32(off))
27930 v.AddArg(ptr)
27931 return true
27932 }
27933
27934
27935 for {
27936 off := auxIntToInt64(v.AuxInt)
27937 ptr := v_0
27938 v.reset(OpAMD64ADDQ)
27939 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
27940 v0.AuxInt = int64ToAuxInt(off)
27941 v.AddArg2(v0, ptr)
27942 return true
27943 }
27944 }
27945 func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
27946 v_2 := v.Args[2]
27947 v_1 := v.Args[1]
27948 v_0 := v.Args[0]
27949
27950
27951
27952 for {
27953 kind := auxIntToInt64(v.AuxInt)
27954 x := v_0
27955 y := v_1
27956 mem := v_2
27957 if !(boundsABI(kind) == 0) {
27958 break
27959 }
27960 v.reset(OpAMD64LoweredPanicBoundsA)
27961 v.AuxInt = int64ToAuxInt(kind)
27962 v.AddArg3(x, y, mem)
27963 return true
27964 }
27965
27966
27967
27968 for {
27969 kind := auxIntToInt64(v.AuxInt)
27970 x := v_0
27971 y := v_1
27972 mem := v_2
27973 if !(boundsABI(kind) == 1) {
27974 break
27975 }
27976 v.reset(OpAMD64LoweredPanicBoundsB)
27977 v.AuxInt = int64ToAuxInt(kind)
27978 v.AddArg3(x, y, mem)
27979 return true
27980 }
27981
27982
27983
27984 for {
27985 kind := auxIntToInt64(v.AuxInt)
27986 x := v_0
27987 y := v_1
27988 mem := v_2
27989 if !(boundsABI(kind) == 2) {
27990 break
27991 }
27992 v.reset(OpAMD64LoweredPanicBoundsC)
27993 v.AuxInt = int64ToAuxInt(kind)
27994 v.AddArg3(x, y, mem)
27995 return true
27996 }
27997 return false
27998 }
27999 func rewriteValueAMD64_OpPopCount16(v *Value) bool {
28000 v_0 := v.Args[0]
28001 b := v.Block
28002 typ := &b.Func.Config.Types
28003
28004
28005 for {
28006 x := v_0
28007 v.reset(OpAMD64POPCNTL)
28008 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
28009 v0.AddArg(x)
28010 v.AddArg(v0)
28011 return true
28012 }
28013 }
28014 func rewriteValueAMD64_OpPopCount8(v *Value) bool {
28015 v_0 := v.Args[0]
28016 b := v.Block
28017 typ := &b.Func.Config.Types
28018
28019
28020 for {
28021 x := v_0
28022 v.reset(OpAMD64POPCNTL)
28023 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
28024 v0.AddArg(x)
28025 v.AddArg(v0)
28026 return true
28027 }
28028 }
28029 func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
28030 v_0 := v.Args[0]
28031
28032
28033 for {
28034 x := v_0
28035 v.reset(OpAMD64ROUNDSD)
28036 v.AuxInt = int8ToAuxInt(0)
28037 v.AddArg(x)
28038 return true
28039 }
28040 }
28041 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
28042 v_1 := v.Args[1]
28043 v_0 := v.Args[0]
28044 b := v.Block
28045
28046
28047
28048 for {
28049 t := v.Type
28050 x := v_0
28051 y := v_1
28052 if !(!shiftIsBounded(v)) {
28053 break
28054 }
28055 v.reset(OpAMD64ANDL)
28056 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28057 v0.AddArg2(x, y)
28058 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28059 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28060 v2.AuxInt = int16ToAuxInt(16)
28061 v2.AddArg(y)
28062 v1.AddArg(v2)
28063 v.AddArg2(v0, v1)
28064 return true
28065 }
28066
28067
28068
28069 for {
28070 x := v_0
28071 y := v_1
28072 if !(shiftIsBounded(v)) {
28073 break
28074 }
28075 v.reset(OpAMD64SHRW)
28076 v.AddArg2(x, y)
28077 return true
28078 }
28079 return false
28080 }
28081 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
28082 v_1 := v.Args[1]
28083 v_0 := v.Args[0]
28084 b := v.Block
28085
28086
28087
28088 for {
28089 t := v.Type
28090 x := v_0
28091 y := v_1
28092 if !(!shiftIsBounded(v)) {
28093 break
28094 }
28095 v.reset(OpAMD64ANDL)
28096 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28097 v0.AddArg2(x, y)
28098 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28099 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28100 v2.AuxInt = int32ToAuxInt(16)
28101 v2.AddArg(y)
28102 v1.AddArg(v2)
28103 v.AddArg2(v0, v1)
28104 return true
28105 }
28106
28107
28108
28109 for {
28110 x := v_0
28111 y := v_1
28112 if !(shiftIsBounded(v)) {
28113 break
28114 }
28115 v.reset(OpAMD64SHRW)
28116 v.AddArg2(x, y)
28117 return true
28118 }
28119 return false
28120 }
28121 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
28122 v_1 := v.Args[1]
28123 v_0 := v.Args[0]
28124 b := v.Block
28125
28126
28127
28128 for {
28129 t := v.Type
28130 x := v_0
28131 y := v_1
28132 if !(!shiftIsBounded(v)) {
28133 break
28134 }
28135 v.reset(OpAMD64ANDL)
28136 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28137 v0.AddArg2(x, y)
28138 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28139 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28140 v2.AuxInt = int32ToAuxInt(16)
28141 v2.AddArg(y)
28142 v1.AddArg(v2)
28143 v.AddArg2(v0, v1)
28144 return true
28145 }
28146
28147
28148
28149 for {
28150 x := v_0
28151 y := v_1
28152 if !(shiftIsBounded(v)) {
28153 break
28154 }
28155 v.reset(OpAMD64SHRW)
28156 v.AddArg2(x, y)
28157 return true
28158 }
28159 return false
28160 }
28161 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
28162 v_1 := v.Args[1]
28163 v_0 := v.Args[0]
28164 b := v.Block
28165
28166
28167
28168 for {
28169 t := v.Type
28170 x := v_0
28171 y := v_1
28172 if !(!shiftIsBounded(v)) {
28173 break
28174 }
28175 v.reset(OpAMD64ANDL)
28176 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28177 v0.AddArg2(x, y)
28178 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28179 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28180 v2.AuxInt = int8ToAuxInt(16)
28181 v2.AddArg(y)
28182 v1.AddArg(v2)
28183 v.AddArg2(v0, v1)
28184 return true
28185 }
28186
28187
28188
28189 for {
28190 x := v_0
28191 y := v_1
28192 if !(shiftIsBounded(v)) {
28193 break
28194 }
28195 v.reset(OpAMD64SHRW)
28196 v.AddArg2(x, y)
28197 return true
28198 }
28199 return false
28200 }
28201 func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
28202 v_1 := v.Args[1]
28203 v_0 := v.Args[0]
28204 b := v.Block
28205
28206
28207
28208 for {
28209 t := v.Type
28210 x := v_0
28211 y := v_1
28212 if !(!shiftIsBounded(v)) {
28213 break
28214 }
28215 v.reset(OpAMD64SARW)
28216 v.Type = t
28217 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28218 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28219 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28220 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28221 v3.AuxInt = int16ToAuxInt(16)
28222 v3.AddArg(y)
28223 v2.AddArg(v3)
28224 v1.AddArg(v2)
28225 v0.AddArg2(y, v1)
28226 v.AddArg2(x, v0)
28227 return true
28228 }
28229
28230
28231
28232 for {
28233 x := v_0
28234 y := v_1
28235 if !(shiftIsBounded(v)) {
28236 break
28237 }
28238 v.reset(OpAMD64SARW)
28239 v.AddArg2(x, y)
28240 return true
28241 }
28242 return false
28243 }
28244 func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
28245 v_1 := v.Args[1]
28246 v_0 := v.Args[0]
28247 b := v.Block
28248
28249
28250
28251 for {
28252 t := v.Type
28253 x := v_0
28254 y := v_1
28255 if !(!shiftIsBounded(v)) {
28256 break
28257 }
28258 v.reset(OpAMD64SARW)
28259 v.Type = t
28260 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28261 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28262 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28263 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28264 v3.AuxInt = int32ToAuxInt(16)
28265 v3.AddArg(y)
28266 v2.AddArg(v3)
28267 v1.AddArg(v2)
28268 v0.AddArg2(y, v1)
28269 v.AddArg2(x, v0)
28270 return true
28271 }
28272
28273
28274
28275 for {
28276 x := v_0
28277 y := v_1
28278 if !(shiftIsBounded(v)) {
28279 break
28280 }
28281 v.reset(OpAMD64SARW)
28282 v.AddArg2(x, y)
28283 return true
28284 }
28285 return false
28286 }
28287 func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
28288 v_1 := v.Args[1]
28289 v_0 := v.Args[0]
28290 b := v.Block
28291
28292
28293
28294 for {
28295 t := v.Type
28296 x := v_0
28297 y := v_1
28298 if !(!shiftIsBounded(v)) {
28299 break
28300 }
28301 v.reset(OpAMD64SARW)
28302 v.Type = t
28303 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28304 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28305 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28306 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28307 v3.AuxInt = int32ToAuxInt(16)
28308 v3.AddArg(y)
28309 v2.AddArg(v3)
28310 v1.AddArg(v2)
28311 v0.AddArg2(y, v1)
28312 v.AddArg2(x, v0)
28313 return true
28314 }
28315
28316
28317
28318 for {
28319 x := v_0
28320 y := v_1
28321 if !(shiftIsBounded(v)) {
28322 break
28323 }
28324 v.reset(OpAMD64SARW)
28325 v.AddArg2(x, y)
28326 return true
28327 }
28328 return false
28329 }
28330 func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
28331 v_1 := v.Args[1]
28332 v_0 := v.Args[0]
28333 b := v.Block
28334
28335
28336
28337 for {
28338 t := v.Type
28339 x := v_0
28340 y := v_1
28341 if !(!shiftIsBounded(v)) {
28342 break
28343 }
28344 v.reset(OpAMD64SARW)
28345 v.Type = t
28346 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28347 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28348 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28349 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28350 v3.AuxInt = int8ToAuxInt(16)
28351 v3.AddArg(y)
28352 v2.AddArg(v3)
28353 v1.AddArg(v2)
28354 v0.AddArg2(y, v1)
28355 v.AddArg2(x, v0)
28356 return true
28357 }
28358
28359
28360
28361 for {
28362 x := v_0
28363 y := v_1
28364 if !(shiftIsBounded(v)) {
28365 break
28366 }
28367 v.reset(OpAMD64SARW)
28368 v.AddArg2(x, y)
28369 return true
28370 }
28371 return false
28372 }
28373 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
28374 v_1 := v.Args[1]
28375 v_0 := v.Args[0]
28376 b := v.Block
28377
28378
28379
28380 for {
28381 t := v.Type
28382 x := v_0
28383 y := v_1
28384 if !(!shiftIsBounded(v)) {
28385 break
28386 }
28387 v.reset(OpAMD64ANDL)
28388 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28389 v0.AddArg2(x, y)
28390 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28391 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28392 v2.AuxInt = int16ToAuxInt(32)
28393 v2.AddArg(y)
28394 v1.AddArg(v2)
28395 v.AddArg2(v0, v1)
28396 return true
28397 }
28398
28399
28400
28401 for {
28402 x := v_0
28403 y := v_1
28404 if !(shiftIsBounded(v)) {
28405 break
28406 }
28407 v.reset(OpAMD64SHRL)
28408 v.AddArg2(x, y)
28409 return true
28410 }
28411 return false
28412 }
28413 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
28414 v_1 := v.Args[1]
28415 v_0 := v.Args[0]
28416 b := v.Block
28417
28418
28419
28420 for {
28421 t := v.Type
28422 x := v_0
28423 y := v_1
28424 if !(!shiftIsBounded(v)) {
28425 break
28426 }
28427 v.reset(OpAMD64ANDL)
28428 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28429 v0.AddArg2(x, y)
28430 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28431 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28432 v2.AuxInt = int32ToAuxInt(32)
28433 v2.AddArg(y)
28434 v1.AddArg(v2)
28435 v.AddArg2(v0, v1)
28436 return true
28437 }
28438
28439
28440
28441 for {
28442 x := v_0
28443 y := v_1
28444 if !(shiftIsBounded(v)) {
28445 break
28446 }
28447 v.reset(OpAMD64SHRL)
28448 v.AddArg2(x, y)
28449 return true
28450 }
28451 return false
28452 }
28453 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
28454 v_1 := v.Args[1]
28455 v_0 := v.Args[0]
28456 b := v.Block
28457
28458
28459
28460 for {
28461 t := v.Type
28462 x := v_0
28463 y := v_1
28464 if !(!shiftIsBounded(v)) {
28465 break
28466 }
28467 v.reset(OpAMD64ANDL)
28468 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28469 v0.AddArg2(x, y)
28470 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28471 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28472 v2.AuxInt = int32ToAuxInt(32)
28473 v2.AddArg(y)
28474 v1.AddArg(v2)
28475 v.AddArg2(v0, v1)
28476 return true
28477 }
28478
28479
28480
28481 for {
28482 x := v_0
28483 y := v_1
28484 if !(shiftIsBounded(v)) {
28485 break
28486 }
28487 v.reset(OpAMD64SHRL)
28488 v.AddArg2(x, y)
28489 return true
28490 }
28491 return false
28492 }
28493 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
28494 v_1 := v.Args[1]
28495 v_0 := v.Args[0]
28496 b := v.Block
28497
28498
28499
28500 for {
28501 t := v.Type
28502 x := v_0
28503 y := v_1
28504 if !(!shiftIsBounded(v)) {
28505 break
28506 }
28507 v.reset(OpAMD64ANDL)
28508 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28509 v0.AddArg2(x, y)
28510 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28511 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28512 v2.AuxInt = int8ToAuxInt(32)
28513 v2.AddArg(y)
28514 v1.AddArg(v2)
28515 v.AddArg2(v0, v1)
28516 return true
28517 }
28518
28519
28520
28521 for {
28522 x := v_0
28523 y := v_1
28524 if !(shiftIsBounded(v)) {
28525 break
28526 }
28527 v.reset(OpAMD64SHRL)
28528 v.AddArg2(x, y)
28529 return true
28530 }
28531 return false
28532 }
28533 func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
28534 v_1 := v.Args[1]
28535 v_0 := v.Args[0]
28536 b := v.Block
28537
28538
28539
28540 for {
28541 t := v.Type
28542 x := v_0
28543 y := v_1
28544 if !(!shiftIsBounded(v)) {
28545 break
28546 }
28547 v.reset(OpAMD64SARL)
28548 v.Type = t
28549 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28550 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28551 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28552 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28553 v3.AuxInt = int16ToAuxInt(32)
28554 v3.AddArg(y)
28555 v2.AddArg(v3)
28556 v1.AddArg(v2)
28557 v0.AddArg2(y, v1)
28558 v.AddArg2(x, v0)
28559 return true
28560 }
28561
28562
28563
28564 for {
28565 x := v_0
28566 y := v_1
28567 if !(shiftIsBounded(v)) {
28568 break
28569 }
28570 v.reset(OpAMD64SARL)
28571 v.AddArg2(x, y)
28572 return true
28573 }
28574 return false
28575 }
28576 func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
28577 v_1 := v.Args[1]
28578 v_0 := v.Args[0]
28579 b := v.Block
28580
28581
28582
28583 for {
28584 t := v.Type
28585 x := v_0
28586 y := v_1
28587 if !(!shiftIsBounded(v)) {
28588 break
28589 }
28590 v.reset(OpAMD64SARL)
28591 v.Type = t
28592 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28593 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28594 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28595 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28596 v3.AuxInt = int32ToAuxInt(32)
28597 v3.AddArg(y)
28598 v2.AddArg(v3)
28599 v1.AddArg(v2)
28600 v0.AddArg2(y, v1)
28601 v.AddArg2(x, v0)
28602 return true
28603 }
28604
28605
28606
28607 for {
28608 x := v_0
28609 y := v_1
28610 if !(shiftIsBounded(v)) {
28611 break
28612 }
28613 v.reset(OpAMD64SARL)
28614 v.AddArg2(x, y)
28615 return true
28616 }
28617 return false
28618 }
28619 func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
28620 v_1 := v.Args[1]
28621 v_0 := v.Args[0]
28622 b := v.Block
28623
28624
28625
28626 for {
28627 t := v.Type
28628 x := v_0
28629 y := v_1
28630 if !(!shiftIsBounded(v)) {
28631 break
28632 }
28633 v.reset(OpAMD64SARL)
28634 v.Type = t
28635 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28636 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28637 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28638 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28639 v3.AuxInt = int32ToAuxInt(32)
28640 v3.AddArg(y)
28641 v2.AddArg(v3)
28642 v1.AddArg(v2)
28643 v0.AddArg2(y, v1)
28644 v.AddArg2(x, v0)
28645 return true
28646 }
28647
28648
28649
28650 for {
28651 x := v_0
28652 y := v_1
28653 if !(shiftIsBounded(v)) {
28654 break
28655 }
28656 v.reset(OpAMD64SARL)
28657 v.AddArg2(x, y)
28658 return true
28659 }
28660 return false
28661 }
28662 func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
28663 v_1 := v.Args[1]
28664 v_0 := v.Args[0]
28665 b := v.Block
28666
28667
28668
28669 for {
28670 t := v.Type
28671 x := v_0
28672 y := v_1
28673 if !(!shiftIsBounded(v)) {
28674 break
28675 }
28676 v.reset(OpAMD64SARL)
28677 v.Type = t
28678 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28679 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28680 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28681 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28682 v3.AuxInt = int8ToAuxInt(32)
28683 v3.AddArg(y)
28684 v2.AddArg(v3)
28685 v1.AddArg(v2)
28686 v0.AddArg2(y, v1)
28687 v.AddArg2(x, v0)
28688 return true
28689 }
28690
28691
28692
28693 for {
28694 x := v_0
28695 y := v_1
28696 if !(shiftIsBounded(v)) {
28697 break
28698 }
28699 v.reset(OpAMD64SARL)
28700 v.AddArg2(x, y)
28701 return true
28702 }
28703 return false
28704 }
28705 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
28706 v_1 := v.Args[1]
28707 v_0 := v.Args[0]
28708 b := v.Block
28709
28710
28711
28712 for {
28713 t := v.Type
28714 x := v_0
28715 y := v_1
28716 if !(!shiftIsBounded(v)) {
28717 break
28718 }
28719 v.reset(OpAMD64ANDQ)
28720 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28721 v0.AddArg2(x, y)
28722 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28723 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28724 v2.AuxInt = int16ToAuxInt(64)
28725 v2.AddArg(y)
28726 v1.AddArg(v2)
28727 v.AddArg2(v0, v1)
28728 return true
28729 }
28730
28731
28732
28733 for {
28734 x := v_0
28735 y := v_1
28736 if !(shiftIsBounded(v)) {
28737 break
28738 }
28739 v.reset(OpAMD64SHRQ)
28740 v.AddArg2(x, y)
28741 return true
28742 }
28743 return false
28744 }
28745 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
28746 v_1 := v.Args[1]
28747 v_0 := v.Args[0]
28748 b := v.Block
28749
28750
28751
28752 for {
28753 t := v.Type
28754 x := v_0
28755 y := v_1
28756 if !(!shiftIsBounded(v)) {
28757 break
28758 }
28759 v.reset(OpAMD64ANDQ)
28760 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28761 v0.AddArg2(x, y)
28762 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28763 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28764 v2.AuxInt = int32ToAuxInt(64)
28765 v2.AddArg(y)
28766 v1.AddArg(v2)
28767 v.AddArg2(v0, v1)
28768 return true
28769 }
28770
28771
28772
28773 for {
28774 x := v_0
28775 y := v_1
28776 if !(shiftIsBounded(v)) {
28777 break
28778 }
28779 v.reset(OpAMD64SHRQ)
28780 v.AddArg2(x, y)
28781 return true
28782 }
28783 return false
28784 }
28785 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
28786 v_1 := v.Args[1]
28787 v_0 := v.Args[0]
28788 b := v.Block
28789
28790
28791
28792 for {
28793 t := v.Type
28794 x := v_0
28795 y := v_1
28796 if !(!shiftIsBounded(v)) {
28797 break
28798 }
28799 v.reset(OpAMD64ANDQ)
28800 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28801 v0.AddArg2(x, y)
28802 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28803 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28804 v2.AuxInt = int32ToAuxInt(64)
28805 v2.AddArg(y)
28806 v1.AddArg(v2)
28807 v.AddArg2(v0, v1)
28808 return true
28809 }
28810
28811
28812
28813 for {
28814 x := v_0
28815 y := v_1
28816 if !(shiftIsBounded(v)) {
28817 break
28818 }
28819 v.reset(OpAMD64SHRQ)
28820 v.AddArg2(x, y)
28821 return true
28822 }
28823 return false
28824 }
28825 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
28826 v_1 := v.Args[1]
28827 v_0 := v.Args[0]
28828 b := v.Block
28829
28830
28831
28832 for {
28833 t := v.Type
28834 x := v_0
28835 y := v_1
28836 if !(!shiftIsBounded(v)) {
28837 break
28838 }
28839 v.reset(OpAMD64ANDQ)
28840 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28841 v0.AddArg2(x, y)
28842 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28843 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28844 v2.AuxInt = int8ToAuxInt(64)
28845 v2.AddArg(y)
28846 v1.AddArg(v2)
28847 v.AddArg2(v0, v1)
28848 return true
28849 }
28850
28851
28852
28853 for {
28854 x := v_0
28855 y := v_1
28856 if !(shiftIsBounded(v)) {
28857 break
28858 }
28859 v.reset(OpAMD64SHRQ)
28860 v.AddArg2(x, y)
28861 return true
28862 }
28863 return false
28864 }
28865 func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
28866 v_1 := v.Args[1]
28867 v_0 := v.Args[0]
28868 b := v.Block
28869
28870
28871
28872 for {
28873 t := v.Type
28874 x := v_0
28875 y := v_1
28876 if !(!shiftIsBounded(v)) {
28877 break
28878 }
28879 v.reset(OpAMD64SARQ)
28880 v.Type = t
28881 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28882 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28883 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28884 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28885 v3.AuxInt = int16ToAuxInt(64)
28886 v3.AddArg(y)
28887 v2.AddArg(v3)
28888 v1.AddArg(v2)
28889 v0.AddArg2(y, v1)
28890 v.AddArg2(x, v0)
28891 return true
28892 }
28893
28894
28895
28896 for {
28897 x := v_0
28898 y := v_1
28899 if !(shiftIsBounded(v)) {
28900 break
28901 }
28902 v.reset(OpAMD64SARQ)
28903 v.AddArg2(x, y)
28904 return true
28905 }
28906 return false
28907 }
28908 func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
28909 v_1 := v.Args[1]
28910 v_0 := v.Args[0]
28911 b := v.Block
28912
28913
28914
28915 for {
28916 t := v.Type
28917 x := v_0
28918 y := v_1
28919 if !(!shiftIsBounded(v)) {
28920 break
28921 }
28922 v.reset(OpAMD64SARQ)
28923 v.Type = t
28924 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28925 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28926 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28927 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28928 v3.AuxInt = int32ToAuxInt(64)
28929 v3.AddArg(y)
28930 v2.AddArg(v3)
28931 v1.AddArg(v2)
28932 v0.AddArg2(y, v1)
28933 v.AddArg2(x, v0)
28934 return true
28935 }
28936
28937
28938
28939 for {
28940 x := v_0
28941 y := v_1
28942 if !(shiftIsBounded(v)) {
28943 break
28944 }
28945 v.reset(OpAMD64SARQ)
28946 v.AddArg2(x, y)
28947 return true
28948 }
28949 return false
28950 }
28951 func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
28952 v_1 := v.Args[1]
28953 v_0 := v.Args[0]
28954 b := v.Block
28955
28956
28957
28958 for {
28959 t := v.Type
28960 x := v_0
28961 y := v_1
28962 if !(!shiftIsBounded(v)) {
28963 break
28964 }
28965 v.reset(OpAMD64SARQ)
28966 v.Type = t
28967 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28968 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28969 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28970 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28971 v3.AuxInt = int32ToAuxInt(64)
28972 v3.AddArg(y)
28973 v2.AddArg(v3)
28974 v1.AddArg(v2)
28975 v0.AddArg2(y, v1)
28976 v.AddArg2(x, v0)
28977 return true
28978 }
28979
28980
28981
28982 for {
28983 x := v_0
28984 y := v_1
28985 if !(shiftIsBounded(v)) {
28986 break
28987 }
28988 v.reset(OpAMD64SARQ)
28989 v.AddArg2(x, y)
28990 return true
28991 }
28992 return false
28993 }
28994 func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
28995 v_1 := v.Args[1]
28996 v_0 := v.Args[0]
28997 b := v.Block
28998
28999
29000
29001 for {
29002 t := v.Type
29003 x := v_0
29004 y := v_1
29005 if !(!shiftIsBounded(v)) {
29006 break
29007 }
29008 v.reset(OpAMD64SARQ)
29009 v.Type = t
29010 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29011 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29012 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29013 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29014 v3.AuxInt = int8ToAuxInt(64)
29015 v3.AddArg(y)
29016 v2.AddArg(v3)
29017 v1.AddArg(v2)
29018 v0.AddArg2(y, v1)
29019 v.AddArg2(x, v0)
29020 return true
29021 }
29022
29023
29024
29025 for {
29026 x := v_0
29027 y := v_1
29028 if !(shiftIsBounded(v)) {
29029 break
29030 }
29031 v.reset(OpAMD64SARQ)
29032 v.AddArg2(x, y)
29033 return true
29034 }
29035 return false
29036 }
29037 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
29038 v_1 := v.Args[1]
29039 v_0 := v.Args[0]
29040 b := v.Block
29041
29042
29043
29044 for {
29045 t := v.Type
29046 x := v_0
29047 y := v_1
29048 if !(!shiftIsBounded(v)) {
29049 break
29050 }
29051 v.reset(OpAMD64ANDL)
29052 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29053 v0.AddArg2(x, y)
29054 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29055 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29056 v2.AuxInt = int16ToAuxInt(8)
29057 v2.AddArg(y)
29058 v1.AddArg(v2)
29059 v.AddArg2(v0, v1)
29060 return true
29061 }
29062
29063
29064
29065 for {
29066 x := v_0
29067 y := v_1
29068 if !(shiftIsBounded(v)) {
29069 break
29070 }
29071 v.reset(OpAMD64SHRB)
29072 v.AddArg2(x, y)
29073 return true
29074 }
29075 return false
29076 }
29077 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
29078 v_1 := v.Args[1]
29079 v_0 := v.Args[0]
29080 b := v.Block
29081
29082
29083
29084 for {
29085 t := v.Type
29086 x := v_0
29087 y := v_1
29088 if !(!shiftIsBounded(v)) {
29089 break
29090 }
29091 v.reset(OpAMD64ANDL)
29092 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29093 v0.AddArg2(x, y)
29094 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29095 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29096 v2.AuxInt = int32ToAuxInt(8)
29097 v2.AddArg(y)
29098 v1.AddArg(v2)
29099 v.AddArg2(v0, v1)
29100 return true
29101 }
29102
29103
29104
29105 for {
29106 x := v_0
29107 y := v_1
29108 if !(shiftIsBounded(v)) {
29109 break
29110 }
29111 v.reset(OpAMD64SHRB)
29112 v.AddArg2(x, y)
29113 return true
29114 }
29115 return false
29116 }
29117 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
29118 v_1 := v.Args[1]
29119 v_0 := v.Args[0]
29120 b := v.Block
29121
29122
29123
29124 for {
29125 t := v.Type
29126 x := v_0
29127 y := v_1
29128 if !(!shiftIsBounded(v)) {
29129 break
29130 }
29131 v.reset(OpAMD64ANDL)
29132 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29133 v0.AddArg2(x, y)
29134 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29135 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29136 v2.AuxInt = int32ToAuxInt(8)
29137 v2.AddArg(y)
29138 v1.AddArg(v2)
29139 v.AddArg2(v0, v1)
29140 return true
29141 }
29142
29143
29144
29145 for {
29146 x := v_0
29147 y := v_1
29148 if !(shiftIsBounded(v)) {
29149 break
29150 }
29151 v.reset(OpAMD64SHRB)
29152 v.AddArg2(x, y)
29153 return true
29154 }
29155 return false
29156 }
29157 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
29158 v_1 := v.Args[1]
29159 v_0 := v.Args[0]
29160 b := v.Block
29161
29162
29163
29164 for {
29165 t := v.Type
29166 x := v_0
29167 y := v_1
29168 if !(!shiftIsBounded(v)) {
29169 break
29170 }
29171 v.reset(OpAMD64ANDL)
29172 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29173 v0.AddArg2(x, y)
29174 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29175 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29176 v2.AuxInt = int8ToAuxInt(8)
29177 v2.AddArg(y)
29178 v1.AddArg(v2)
29179 v.AddArg2(v0, v1)
29180 return true
29181 }
29182
29183
29184
29185 for {
29186 x := v_0
29187 y := v_1
29188 if !(shiftIsBounded(v)) {
29189 break
29190 }
29191 v.reset(OpAMD64SHRB)
29192 v.AddArg2(x, y)
29193 return true
29194 }
29195 return false
29196 }
29197 func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
29198 v_1 := v.Args[1]
29199 v_0 := v.Args[0]
29200 b := v.Block
29201
29202
29203
29204 for {
29205 t := v.Type
29206 x := v_0
29207 y := v_1
29208 if !(!shiftIsBounded(v)) {
29209 break
29210 }
29211 v.reset(OpAMD64SARB)
29212 v.Type = t
29213 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29214 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29215 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29216 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29217 v3.AuxInt = int16ToAuxInt(8)
29218 v3.AddArg(y)
29219 v2.AddArg(v3)
29220 v1.AddArg(v2)
29221 v0.AddArg2(y, v1)
29222 v.AddArg2(x, v0)
29223 return true
29224 }
29225
29226
29227
29228 for {
29229 x := v_0
29230 y := v_1
29231 if !(shiftIsBounded(v)) {
29232 break
29233 }
29234 v.reset(OpAMD64SARB)
29235 v.AddArg2(x, y)
29236 return true
29237 }
29238 return false
29239 }
29240 func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
29241 v_1 := v.Args[1]
29242 v_0 := v.Args[0]
29243 b := v.Block
29244
29245
29246
29247 for {
29248 t := v.Type
29249 x := v_0
29250 y := v_1
29251 if !(!shiftIsBounded(v)) {
29252 break
29253 }
29254 v.reset(OpAMD64SARB)
29255 v.Type = t
29256 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29257 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29258 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29259 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29260 v3.AuxInt = int32ToAuxInt(8)
29261 v3.AddArg(y)
29262 v2.AddArg(v3)
29263 v1.AddArg(v2)
29264 v0.AddArg2(y, v1)
29265 v.AddArg2(x, v0)
29266 return true
29267 }
29268
29269
29270
29271 for {
29272 x := v_0
29273 y := v_1
29274 if !(shiftIsBounded(v)) {
29275 break
29276 }
29277 v.reset(OpAMD64SARB)
29278 v.AddArg2(x, y)
29279 return true
29280 }
29281 return false
29282 }
29283 func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
29284 v_1 := v.Args[1]
29285 v_0 := v.Args[0]
29286 b := v.Block
29287
29288
29289
29290 for {
29291 t := v.Type
29292 x := v_0
29293 y := v_1
29294 if !(!shiftIsBounded(v)) {
29295 break
29296 }
29297 v.reset(OpAMD64SARB)
29298 v.Type = t
29299 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
29300 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
29301 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
29302 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29303 v3.AuxInt = int32ToAuxInt(8)
29304 v3.AddArg(y)
29305 v2.AddArg(v3)
29306 v1.AddArg(v2)
29307 v0.AddArg2(y, v1)
29308 v.AddArg2(x, v0)
29309 return true
29310 }
29311
29312
29313
29314 for {
29315 x := v_0
29316 y := v_1
29317 if !(shiftIsBounded(v)) {
29318 break
29319 }
29320 v.reset(OpAMD64SARB)
29321 v.AddArg2(x, y)
29322 return true
29323 }
29324 return false
29325 }
29326 func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
29327 v_1 := v.Args[1]
29328 v_0 := v.Args[0]
29329 b := v.Block
29330
29331
29332
29333 for {
29334 t := v.Type
29335 x := v_0
29336 y := v_1
29337 if !(!shiftIsBounded(v)) {
29338 break
29339 }
29340 v.reset(OpAMD64SARB)
29341 v.Type = t
29342 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29343 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29344 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29345 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29346 v3.AuxInt = int8ToAuxInt(8)
29347 v3.AddArg(y)
29348 v2.AddArg(v3)
29349 v1.AddArg(v2)
29350 v0.AddArg2(y, v1)
29351 v.AddArg2(x, v0)
29352 return true
29353 }
29354
29355
29356
29357 for {
29358 x := v_0
29359 y := v_1
29360 if !(shiftIsBounded(v)) {
29361 break
29362 }
29363 v.reset(OpAMD64SARB)
29364 v.AddArg2(x, y)
29365 return true
29366 }
29367 return false
29368 }
29369 func rewriteValueAMD64_OpSelect0(v *Value) bool {
29370 v_0 := v.Args[0]
29371 b := v.Block
29372 typ := &b.Func.Config.Types
29373
29374
29375 for {
29376 if v_0.Op != OpMul64uover {
29377 break
29378 }
29379 y := v_0.Args[1]
29380 x := v_0.Args[0]
29381 v.reset(OpSelect0)
29382 v.Type = typ.UInt64
29383 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29384 v0.AddArg2(x, y)
29385 v.AddArg(v0)
29386 return true
29387 }
29388
29389
29390 for {
29391 if v_0.Op != OpMul32uover {
29392 break
29393 }
29394 y := v_0.Args[1]
29395 x := v_0.Args[0]
29396 v.reset(OpSelect0)
29397 v.Type = typ.UInt32
29398 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29399 v0.AddArg2(x, y)
29400 v.AddArg(v0)
29401 return true
29402 }
29403
29404
29405 for {
29406 if v_0.Op != OpAdd64carry {
29407 break
29408 }
29409 c := v_0.Args[2]
29410 x := v_0.Args[0]
29411 y := v_0.Args[1]
29412 v.reset(OpSelect0)
29413 v.Type = typ.UInt64
29414 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29415 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29416 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29417 v2.AddArg(c)
29418 v1.AddArg(v2)
29419 v0.AddArg3(x, y, v1)
29420 v.AddArg(v0)
29421 return true
29422 }
29423
29424
29425 for {
29426 if v_0.Op != OpSub64borrow {
29427 break
29428 }
29429 c := v_0.Args[2]
29430 x := v_0.Args[0]
29431 y := v_0.Args[1]
29432 v.reset(OpSelect0)
29433 v.Type = typ.UInt64
29434 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29435 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29436 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29437 v2.AddArg(c)
29438 v1.AddArg(v2)
29439 v0.AddArg3(x, y, v1)
29440 v.AddArg(v0)
29441 return true
29442 }
29443
29444
29445 for {
29446 t := v.Type
29447 if v_0.Op != OpAMD64AddTupleFirst32 {
29448 break
29449 }
29450 tuple := v_0.Args[1]
29451 val := v_0.Args[0]
29452 v.reset(OpAMD64ADDL)
29453 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29454 v0.AddArg(tuple)
29455 v.AddArg2(val, v0)
29456 return true
29457 }
29458
29459
29460 for {
29461 t := v.Type
29462 if v_0.Op != OpAMD64AddTupleFirst64 {
29463 break
29464 }
29465 tuple := v_0.Args[1]
29466 val := v_0.Args[0]
29467 v.reset(OpAMD64ADDQ)
29468 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29469 v0.AddArg(tuple)
29470 v.AddArg2(val, v0)
29471 return true
29472 }
29473 return false
29474 }
29475 func rewriteValueAMD64_OpSelect1(v *Value) bool {
29476 v_0 := v.Args[0]
29477 b := v.Block
29478 typ := &b.Func.Config.Types
29479
29480
29481 for {
29482 if v_0.Op != OpMul64uover {
29483 break
29484 }
29485 y := v_0.Args[1]
29486 x := v_0.Args[0]
29487 v.reset(OpAMD64SETO)
29488 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29489 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29490 v1.AddArg2(x, y)
29491 v0.AddArg(v1)
29492 v.AddArg(v0)
29493 return true
29494 }
29495
29496
29497 for {
29498 if v_0.Op != OpMul32uover {
29499 break
29500 }
29501 y := v_0.Args[1]
29502 x := v_0.Args[0]
29503 v.reset(OpAMD64SETO)
29504 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29505 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29506 v1.AddArg2(x, y)
29507 v0.AddArg(v1)
29508 v.AddArg(v0)
29509 return true
29510 }
29511
29512
29513 for {
29514 if v_0.Op != OpAdd64carry {
29515 break
29516 }
29517 c := v_0.Args[2]
29518 x := v_0.Args[0]
29519 y := v_0.Args[1]
29520 v.reset(OpAMD64NEGQ)
29521 v.Type = typ.UInt64
29522 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
29523 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29524 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29525 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29526 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29527 v4.AddArg(c)
29528 v3.AddArg(v4)
29529 v2.AddArg3(x, y, v3)
29530 v1.AddArg(v2)
29531 v0.AddArg(v1)
29532 v.AddArg(v0)
29533 return true
29534 }
29535
29536
29537 for {
29538 if v_0.Op != OpSub64borrow {
29539 break
29540 }
29541 c := v_0.Args[2]
29542 x := v_0.Args[0]
29543 y := v_0.Args[1]
29544 v.reset(OpAMD64NEGQ)
29545 v.Type = typ.UInt64
29546 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
29547 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29548 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29549 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29550 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29551 v4.AddArg(c)
29552 v3.AddArg(v4)
29553 v2.AddArg3(x, y, v3)
29554 v1.AddArg(v2)
29555 v0.AddArg(v1)
29556 v.AddArg(v0)
29557 return true
29558 }
29559
29560
29561 for {
29562 if v_0.Op != OpAMD64NEGLflags {
29563 break
29564 }
29565 v_0_0 := v_0.Args[0]
29566 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
29567 break
29568 }
29569 v.reset(OpAMD64FlagEQ)
29570 return true
29571 }
29572
29573
29574 for {
29575 if v_0.Op != OpAMD64NEGLflags {
29576 break
29577 }
29578 v_0_0 := v_0.Args[0]
29579 if v_0_0.Op != OpAMD64NEGQ {
29580 break
29581 }
29582 v_0_0_0 := v_0_0.Args[0]
29583 if v_0_0_0.Op != OpAMD64SBBQcarrymask {
29584 break
29585 }
29586 x := v_0_0_0.Args[0]
29587 v.copyOf(x)
29588 return true
29589 }
29590
29591
29592 for {
29593 if v_0.Op != OpAMD64AddTupleFirst32 {
29594 break
29595 }
29596 tuple := v_0.Args[1]
29597 v.reset(OpSelect1)
29598 v.AddArg(tuple)
29599 return true
29600 }
29601
29602
29603 for {
29604 if v_0.Op != OpAMD64AddTupleFirst64 {
29605 break
29606 }
29607 tuple := v_0.Args[1]
29608 v.reset(OpSelect1)
29609 v.AddArg(tuple)
29610 return true
29611 }
29612
29613
29614
29615 for {
29616 a := v_0
29617 if a.Op != OpAMD64LoweredAtomicAnd64 {
29618 break
29619 }
29620 mem := a.Args[2]
29621 ptr := a.Args[0]
29622 val := a.Args[1]
29623 if !(a.Uses == 1 && clobber(a)) {
29624 break
29625 }
29626 v.reset(OpAMD64ANDQlock)
29627 v.AddArg3(ptr, val, mem)
29628 return true
29629 }
29630
29631
29632
29633 for {
29634 a := v_0
29635 if a.Op != OpAMD64LoweredAtomicAnd32 {
29636 break
29637 }
29638 mem := a.Args[2]
29639 ptr := a.Args[0]
29640 val := a.Args[1]
29641 if !(a.Uses == 1 && clobber(a)) {
29642 break
29643 }
29644 v.reset(OpAMD64ANDLlock)
29645 v.AddArg3(ptr, val, mem)
29646 return true
29647 }
29648
29649
29650
29651 for {
29652 a := v_0
29653 if a.Op != OpAMD64LoweredAtomicOr64 {
29654 break
29655 }
29656 mem := a.Args[2]
29657 ptr := a.Args[0]
29658 val := a.Args[1]
29659 if !(a.Uses == 1 && clobber(a)) {
29660 break
29661 }
29662 v.reset(OpAMD64ORQlock)
29663 v.AddArg3(ptr, val, mem)
29664 return true
29665 }
29666
29667
29668
29669 for {
29670 a := v_0
29671 if a.Op != OpAMD64LoweredAtomicOr32 {
29672 break
29673 }
29674 mem := a.Args[2]
29675 ptr := a.Args[0]
29676 val := a.Args[1]
29677 if !(a.Uses == 1 && clobber(a)) {
29678 break
29679 }
29680 v.reset(OpAMD64ORLlock)
29681 v.AddArg3(ptr, val, mem)
29682 return true
29683 }
29684 return false
29685 }
29686 func rewriteValueAMD64_OpSelectN(v *Value) bool {
29687 v_0 := v.Args[0]
29688 b := v.Block
29689 config := b.Func.Config
29690
29691
29692
29693 for {
29694 if auxIntToInt64(v.AuxInt) != 0 {
29695 break
29696 }
29697 call := v_0
29698 if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 {
29699 break
29700 }
29701 sym := auxToCall(call.Aux)
29702 s1 := call.Args[0]
29703 if s1.Op != OpAMD64MOVQstoreconst {
29704 break
29705 }
29706 sc := auxIntToValAndOff(s1.AuxInt)
29707 _ = s1.Args[1]
29708 s2 := s1.Args[1]
29709 if s2.Op != OpAMD64MOVQstore {
29710 break
29711 }
29712 _ = s2.Args[2]
29713 src := s2.Args[1]
29714 s3 := s2.Args[2]
29715 if s3.Op != OpAMD64MOVQstore {
29716 break
29717 }
29718 mem := s3.Args[2]
29719 dst := s3.Args[1]
29720 if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) {
29721 break
29722 }
29723 v.reset(OpMove)
29724 v.AuxInt = int64ToAuxInt(sc.Val64())
29725 v.AddArg3(dst, src, mem)
29726 return true
29727 }
29728
29729
29730
29731 for {
29732 if auxIntToInt64(v.AuxInt) != 0 {
29733 break
29734 }
29735 call := v_0
29736 if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 {
29737 break
29738 }
29739 sym := auxToCall(call.Aux)
29740 mem := call.Args[3]
29741 dst := call.Args[0]
29742 src := call.Args[1]
29743 call_2 := call.Args[2]
29744 if call_2.Op != OpAMD64MOVQconst {
29745 break
29746 }
29747 sz := auxIntToInt64(call_2.AuxInt)
29748 if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
29749 break
29750 }
29751 v.reset(OpMove)
29752 v.AuxInt = int64ToAuxInt(sz)
29753 v.AddArg3(dst, src, mem)
29754 return true
29755 }
29756 return false
29757 }
29758 func rewriteValueAMD64_OpSlicemask(v *Value) bool {
29759 v_0 := v.Args[0]
29760 b := v.Block
29761
29762
29763 for {
29764 t := v.Type
29765 x := v_0
29766 v.reset(OpAMD64SARQconst)
29767 v.AuxInt = int8ToAuxInt(63)
29768 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
29769 v0.AddArg(x)
29770 v.AddArg(v0)
29771 return true
29772 }
29773 }
29774 func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
29775 v_1 := v.Args[1]
29776 v_0 := v.Args[0]
29777 b := v.Block
29778 typ := &b.Func.Config.Types
29779
29780
29781 for {
29782 x := v_0
29783 y := v_1
29784 v.reset(OpAMD64CMOVQCC)
29785 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
29786 v0.AuxInt = int64ToAuxInt(0)
29787 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29788 v1.AddArg2(x, y)
29789 v.AddArg3(x, v0, v1)
29790 return true
29791 }
29792 }
29793 func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
29794 v_1 := v.Args[1]
29795 v_0 := v.Args[0]
29796 b := v.Block
29797 typ := &b.Func.Config.Types
29798
29799
29800 for {
29801 x := v_0
29802 y := v_1
29803 v.reset(OpAMD64CMOVQHI)
29804 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
29805 v0.AuxInt = int64ToAuxInt(0)
29806 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29807 v1.AddArg2(x, y)
29808 v.AddArg3(x, v0, v1)
29809 return true
29810 }
29811 }
29812 func rewriteValueAMD64_OpStore(v *Value) bool {
29813 v_2 := v.Args[2]
29814 v_1 := v.Args[1]
29815 v_0 := v.Args[0]
29816
29817
29818
29819 for {
29820 t := auxToType(v.Aux)
29821 ptr := v_0
29822 val := v_1
29823 mem := v_2
29824 if !(t.Size() == 8 && t.IsFloat()) {
29825 break
29826 }
29827 v.reset(OpAMD64MOVSDstore)
29828 v.AddArg3(ptr, val, mem)
29829 return true
29830 }
29831
29832
29833
29834 for {
29835 t := auxToType(v.Aux)
29836 ptr := v_0
29837 val := v_1
29838 mem := v_2
29839 if !(t.Size() == 4 && t.IsFloat()) {
29840 break
29841 }
29842 v.reset(OpAMD64MOVSSstore)
29843 v.AddArg3(ptr, val, mem)
29844 return true
29845 }
29846
29847
29848
29849 for {
29850 t := auxToType(v.Aux)
29851 ptr := v_0
29852 val := v_1
29853 mem := v_2
29854 if !(t.Size() == 8 && !t.IsFloat()) {
29855 break
29856 }
29857 v.reset(OpAMD64MOVQstore)
29858 v.AddArg3(ptr, val, mem)
29859 return true
29860 }
29861
29862
29863
29864 for {
29865 t := auxToType(v.Aux)
29866 ptr := v_0
29867 val := v_1
29868 mem := v_2
29869 if !(t.Size() == 4 && !t.IsFloat()) {
29870 break
29871 }
29872 v.reset(OpAMD64MOVLstore)
29873 v.AddArg3(ptr, val, mem)
29874 return true
29875 }
29876
29877
29878
29879 for {
29880 t := auxToType(v.Aux)
29881 ptr := v_0
29882 val := v_1
29883 mem := v_2
29884 if !(t.Size() == 2) {
29885 break
29886 }
29887 v.reset(OpAMD64MOVWstore)
29888 v.AddArg3(ptr, val, mem)
29889 return true
29890 }
29891
29892
29893
29894 for {
29895 t := auxToType(v.Aux)
29896 ptr := v_0
29897 val := v_1
29898 mem := v_2
29899 if !(t.Size() == 1) {
29900 break
29901 }
29902 v.reset(OpAMD64MOVBstore)
29903 v.AddArg3(ptr, val, mem)
29904 return true
29905 }
29906 return false
29907 }
29908 func rewriteValueAMD64_OpTrunc(v *Value) bool {
29909 v_0 := v.Args[0]
29910
29911
29912 for {
29913 x := v_0
29914 v.reset(OpAMD64ROUNDSD)
29915 v.AuxInt = int8ToAuxInt(3)
29916 v.AddArg(x)
29917 return true
29918 }
29919 }
29920 func rewriteValueAMD64_OpZero(v *Value) bool {
29921 v_1 := v.Args[1]
29922 v_0 := v.Args[0]
29923 b := v.Block
29924 config := b.Func.Config
29925 typ := &b.Func.Config.Types
29926
29927
29928 for {
29929 if auxIntToInt64(v.AuxInt) != 0 {
29930 break
29931 }
29932 mem := v_1
29933 v.copyOf(mem)
29934 return true
29935 }
29936
29937
29938 for {
29939 if auxIntToInt64(v.AuxInt) != 1 {
29940 break
29941 }
29942 destptr := v_0
29943 mem := v_1
29944 v.reset(OpAMD64MOVBstoreconst)
29945 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29946 v.AddArg2(destptr, mem)
29947 return true
29948 }
29949
29950
29951 for {
29952 if auxIntToInt64(v.AuxInt) != 2 {
29953 break
29954 }
29955 destptr := v_0
29956 mem := v_1
29957 v.reset(OpAMD64MOVWstoreconst)
29958 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29959 v.AddArg2(destptr, mem)
29960 return true
29961 }
29962
29963
29964 for {
29965 if auxIntToInt64(v.AuxInt) != 4 {
29966 break
29967 }
29968 destptr := v_0
29969 mem := v_1
29970 v.reset(OpAMD64MOVLstoreconst)
29971 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29972 v.AddArg2(destptr, mem)
29973 return true
29974 }
29975
29976
29977 for {
29978 if auxIntToInt64(v.AuxInt) != 8 {
29979 break
29980 }
29981 destptr := v_0
29982 mem := v_1
29983 v.reset(OpAMD64MOVQstoreconst)
29984 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29985 v.AddArg2(destptr, mem)
29986 return true
29987 }
29988
29989
29990 for {
29991 if auxIntToInt64(v.AuxInt) != 3 {
29992 break
29993 }
29994 destptr := v_0
29995 mem := v_1
29996 v.reset(OpAMD64MOVBstoreconst)
29997 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
29998 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
29999 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30000 v0.AddArg2(destptr, mem)
30001 v.AddArg2(destptr, v0)
30002 return true
30003 }
30004
30005
30006 for {
30007 if auxIntToInt64(v.AuxInt) != 5 {
30008 break
30009 }
30010 destptr := v_0
30011 mem := v_1
30012 v.reset(OpAMD64MOVBstoreconst)
30013 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
30014 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
30015 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30016 v0.AddArg2(destptr, mem)
30017 v.AddArg2(destptr, v0)
30018 return true
30019 }
30020
30021
30022 for {
30023 if auxIntToInt64(v.AuxInt) != 6 {
30024 break
30025 }
30026 destptr := v_0
30027 mem := v_1
30028 v.reset(OpAMD64MOVWstoreconst)
30029 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
30030 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
30031 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30032 v0.AddArg2(destptr, mem)
30033 v.AddArg2(destptr, v0)
30034 return true
30035 }
30036
30037
30038 for {
30039 if auxIntToInt64(v.AuxInt) != 7 {
30040 break
30041 }
30042 destptr := v_0
30043 mem := v_1
30044 v.reset(OpAMD64MOVLstoreconst)
30045 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
30046 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
30047 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30048 v0.AddArg2(destptr, mem)
30049 v.AddArg2(destptr, v0)
30050 return true
30051 }
30052
30053
30054
30055 for {
30056 s := auxIntToInt64(v.AuxInt)
30057 destptr := v_0
30058 mem := v_1
30059 if !(s%8 != 0 && s > 8 && !config.useSSE) {
30060 break
30061 }
30062 v.reset(OpZero)
30063 v.AuxInt = int64ToAuxInt(s - s%8)
30064 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
30065 v0.AuxInt = int64ToAuxInt(s % 8)
30066 v0.AddArg(destptr)
30067 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30068 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30069 v1.AddArg2(destptr, mem)
30070 v.AddArg2(v0, v1)
30071 return true
30072 }
30073
30074
30075
30076 for {
30077 if auxIntToInt64(v.AuxInt) != 16 {
30078 break
30079 }
30080 destptr := v_0
30081 mem := v_1
30082 if !(!config.useSSE) {
30083 break
30084 }
30085 v.reset(OpAMD64MOVQstoreconst)
30086 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30087 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30088 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30089 v0.AddArg2(destptr, mem)
30090 v.AddArg2(destptr, v0)
30091 return true
30092 }
30093
30094
30095
30096 for {
30097 if auxIntToInt64(v.AuxInt) != 24 {
30098 break
30099 }
30100 destptr := v_0
30101 mem := v_1
30102 if !(!config.useSSE) {
30103 break
30104 }
30105 v.reset(OpAMD64MOVQstoreconst)
30106 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30107 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30108 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30109 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30110 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30111 v1.AddArg2(destptr, mem)
30112 v0.AddArg2(destptr, v1)
30113 v.AddArg2(destptr, v0)
30114 return true
30115 }
30116
30117
30118
30119 for {
30120 if auxIntToInt64(v.AuxInt) != 32 {
30121 break
30122 }
30123 destptr := v_0
30124 mem := v_1
30125 if !(!config.useSSE) {
30126 break
30127 }
30128 v.reset(OpAMD64MOVQstoreconst)
30129 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24))
30130 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30131 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30132 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30133 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30134 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30135 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30136 v2.AddArg2(destptr, mem)
30137 v1.AddArg2(destptr, v2)
30138 v0.AddArg2(destptr, v1)
30139 v.AddArg2(destptr, v0)
30140 return true
30141 }
30142
30143
30144
30145 for {
30146 if auxIntToInt64(v.AuxInt) != 9 {
30147 break
30148 }
30149 destptr := v_0
30150 mem := v_1
30151 if !(config.useSSE) {
30152 break
30153 }
30154 v.reset(OpAMD64MOVBstoreconst)
30155 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30156 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30157 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30158 v0.AddArg2(destptr, mem)
30159 v.AddArg2(destptr, v0)
30160 return true
30161 }
30162
30163
30164
30165 for {
30166 if auxIntToInt64(v.AuxInt) != 10 {
30167 break
30168 }
30169 destptr := v_0
30170 mem := v_1
30171 if !(config.useSSE) {
30172 break
30173 }
30174 v.reset(OpAMD64MOVWstoreconst)
30175 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30176 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30177 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30178 v0.AddArg2(destptr, mem)
30179 v.AddArg2(destptr, v0)
30180 return true
30181 }
30182
30183
30184
30185 for {
30186 if auxIntToInt64(v.AuxInt) != 11 {
30187 break
30188 }
30189 destptr := v_0
30190 mem := v_1
30191 if !(config.useSSE) {
30192 break
30193 }
30194 v.reset(OpAMD64MOVLstoreconst)
30195 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7))
30196 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30197 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30198 v0.AddArg2(destptr, mem)
30199 v.AddArg2(destptr, v0)
30200 return true
30201 }
30202
30203
30204
30205 for {
30206 if auxIntToInt64(v.AuxInt) != 12 {
30207 break
30208 }
30209 destptr := v_0
30210 mem := v_1
30211 if !(config.useSSE) {
30212 break
30213 }
30214 v.reset(OpAMD64MOVLstoreconst)
30215 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30216 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30217 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30218 v0.AddArg2(destptr, mem)
30219 v.AddArg2(destptr, v0)
30220 return true
30221 }
30222
30223
30224
30225 for {
30226 s := auxIntToInt64(v.AuxInt)
30227 destptr := v_0
30228 mem := v_1
30229 if !(s > 12 && s < 16 && config.useSSE) {
30230 break
30231 }
30232 v.reset(OpAMD64MOVQstoreconst)
30233 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
30234 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30235 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30236 v0.AddArg2(destptr, mem)
30237 v.AddArg2(destptr, v0)
30238 return true
30239 }
30240
30241
30242
30243 for {
30244 s := auxIntToInt64(v.AuxInt)
30245 destptr := v_0
30246 mem := v_1
30247 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
30248 break
30249 }
30250 v.reset(OpZero)
30251 v.AuxInt = int64ToAuxInt(s - s%16)
30252 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
30253 v0.AuxInt = int64ToAuxInt(s % 16)
30254 v0.AddArg(destptr)
30255 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30256 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30257 v1.AddArg2(destptr, mem)
30258 v.AddArg2(v0, v1)
30259 return true
30260 }
30261
30262
30263
30264 for {
30265 s := auxIntToInt64(v.AuxInt)
30266 destptr := v_0
30267 mem := v_1
30268 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
30269 break
30270 }
30271 v.reset(OpZero)
30272 v.AuxInt = int64ToAuxInt(s - s%16)
30273 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
30274 v0.AuxInt = int64ToAuxInt(s % 16)
30275 v0.AddArg(destptr)
30276 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30277 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30278 v1.AddArg2(destptr, mem)
30279 v.AddArg2(v0, v1)
30280 return true
30281 }
30282
30283
30284
30285 for {
30286 if auxIntToInt64(v.AuxInt) != 16 {
30287 break
30288 }
30289 destptr := v_0
30290 mem := v_1
30291 if !(config.useSSE) {
30292 break
30293 }
30294 v.reset(OpAMD64MOVOstoreconst)
30295 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30296 v.AddArg2(destptr, mem)
30297 return true
30298 }
30299
30300
30301
30302 for {
30303 if auxIntToInt64(v.AuxInt) != 32 {
30304 break
30305 }
30306 destptr := v_0
30307 mem := v_1
30308 if !(config.useSSE) {
30309 break
30310 }
30311 v.reset(OpAMD64MOVOstoreconst)
30312 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30313 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30314 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30315 v0.AddArg2(destptr, mem)
30316 v.AddArg2(destptr, v0)
30317 return true
30318 }
30319
30320
30321
30322 for {
30323 if auxIntToInt64(v.AuxInt) != 48 {
30324 break
30325 }
30326 destptr := v_0
30327 mem := v_1
30328 if !(config.useSSE) {
30329 break
30330 }
30331 v.reset(OpAMD64MOVOstoreconst)
30332 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30333 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30334 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30335 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30336 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30337 v1.AddArg2(destptr, mem)
30338 v0.AddArg2(destptr, v1)
30339 v.AddArg2(destptr, v0)
30340 return true
30341 }
30342
30343
30344
30345 for {
30346 if auxIntToInt64(v.AuxInt) != 64 {
30347 break
30348 }
30349 destptr := v_0
30350 mem := v_1
30351 if !(config.useSSE) {
30352 break
30353 }
30354 v.reset(OpAMD64MOVOstoreconst)
30355 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48))
30356 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30357 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30358 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30359 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30360 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30361 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30362 v2.AddArg2(destptr, mem)
30363 v1.AddArg2(destptr, v2)
30364 v0.AddArg2(destptr, v1)
30365 v.AddArg2(destptr, v0)
30366 return true
30367 }
30368
30369
30370
30371 for {
30372 s := auxIntToInt64(v.AuxInt)
30373 destptr := v_0
30374 mem := v_1
30375 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
30376 break
30377 }
30378 v.reset(OpAMD64DUFFZERO)
30379 v.AuxInt = int64ToAuxInt(s)
30380 v.AddArg2(destptr, mem)
30381 return true
30382 }
30383
30384
30385
30386 for {
30387 s := auxIntToInt64(v.AuxInt)
30388 destptr := v_0
30389 mem := v_1
30390 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
30391 break
30392 }
30393 v.reset(OpAMD64REPSTOSQ)
30394 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30395 v0.AuxInt = int64ToAuxInt(s / 8)
30396 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30397 v1.AuxInt = int64ToAuxInt(0)
30398 v.AddArg4(destptr, v0, v1, mem)
30399 return true
30400 }
30401 return false
30402 }
30403 func rewriteBlockAMD64(b *Block) bool {
30404 typ := &b.Func.Config.Types
30405 switch b.Kind {
30406 case BlockAMD64EQ:
30407
30408
30409 for b.Controls[0].Op == OpAMD64TESTL {
30410 v_0 := b.Controls[0]
30411 _ = v_0.Args[1]
30412 v_0_0 := v_0.Args[0]
30413 v_0_1 := v_0.Args[1]
30414 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30415 if v_0_0.Op != OpAMD64SHLL {
30416 continue
30417 }
30418 x := v_0_0.Args[1]
30419 v_0_0_0 := v_0_0.Args[0]
30420 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
30421 continue
30422 }
30423 y := v_0_1
30424 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
30425 v0.AddArg2(x, y)
30426 b.resetWithControl(BlockAMD64UGE, v0)
30427 return true
30428 }
30429 break
30430 }
30431
30432
30433 for b.Controls[0].Op == OpAMD64TESTQ {
30434 v_0 := b.Controls[0]
30435 _ = v_0.Args[1]
30436 v_0_0 := v_0.Args[0]
30437 v_0_1 := v_0.Args[1]
30438 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30439 if v_0_0.Op != OpAMD64SHLQ {
30440 continue
30441 }
30442 x := v_0_0.Args[1]
30443 v_0_0_0 := v_0_0.Args[0]
30444 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
30445 continue
30446 }
30447 y := v_0_1
30448 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
30449 v0.AddArg2(x, y)
30450 b.resetWithControl(BlockAMD64UGE, v0)
30451 return true
30452 }
30453 break
30454 }
30455
30456
30457
30458 for b.Controls[0].Op == OpAMD64TESTLconst {
30459 v_0 := b.Controls[0]
30460 c := auxIntToInt32(v_0.AuxInt)
30461 x := v_0.Args[0]
30462 if !(isUint32PowerOfTwo(int64(c))) {
30463 break
30464 }
30465 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30466 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30467 v0.AddArg(x)
30468 b.resetWithControl(BlockAMD64UGE, v0)
30469 return true
30470 }
30471
30472
30473
30474 for b.Controls[0].Op == OpAMD64TESTQconst {
30475 v_0 := b.Controls[0]
30476 c := auxIntToInt32(v_0.AuxInt)
30477 x := v_0.Args[0]
30478 if !(isUint64PowerOfTwo(int64(c))) {
30479 break
30480 }
30481 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30482 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30483 v0.AddArg(x)
30484 b.resetWithControl(BlockAMD64UGE, v0)
30485 return true
30486 }
30487
30488
30489
30490 for b.Controls[0].Op == OpAMD64TESTQ {
30491 v_0 := b.Controls[0]
30492 _ = v_0.Args[1]
30493 v_0_0 := v_0.Args[0]
30494 v_0_1 := v_0.Args[1]
30495 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30496 if v_0_0.Op != OpAMD64MOVQconst {
30497 continue
30498 }
30499 c := auxIntToInt64(v_0_0.AuxInt)
30500 x := v_0_1
30501 if !(isUint64PowerOfTwo(c)) {
30502 continue
30503 }
30504 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30505 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
30506 v0.AddArg(x)
30507 b.resetWithControl(BlockAMD64UGE, v0)
30508 return true
30509 }
30510 break
30511 }
30512
30513
30514
30515 for b.Controls[0].Op == OpAMD64TESTQ {
30516 v_0 := b.Controls[0]
30517 _ = v_0.Args[1]
30518 v_0_0 := v_0.Args[0]
30519 v_0_1 := v_0.Args[1]
30520 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30521 z1 := v_0_0
30522 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
30523 continue
30524 }
30525 z1_0 := z1.Args[0]
30526 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30527 continue
30528 }
30529 x := z1_0.Args[0]
30530 z2 := v_0_1
30531 if !(z1 == z2) {
30532 continue
30533 }
30534 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30535 v0.AuxInt = int8ToAuxInt(63)
30536 v0.AddArg(x)
30537 b.resetWithControl(BlockAMD64UGE, v0)
30538 return true
30539 }
30540 break
30541 }
30542
30543
30544
30545 for b.Controls[0].Op == OpAMD64TESTL {
30546 v_0 := b.Controls[0]
30547 _ = v_0.Args[1]
30548 v_0_0 := v_0.Args[0]
30549 v_0_1 := v_0.Args[1]
30550 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30551 z1 := v_0_0
30552 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
30553 continue
30554 }
30555 z1_0 := z1.Args[0]
30556 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30557 continue
30558 }
30559 x := z1_0.Args[0]
30560 z2 := v_0_1
30561 if !(z1 == z2) {
30562 continue
30563 }
30564 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30565 v0.AuxInt = int8ToAuxInt(31)
30566 v0.AddArg(x)
30567 b.resetWithControl(BlockAMD64UGE, v0)
30568 return true
30569 }
30570 break
30571 }
30572
30573
30574
30575 for b.Controls[0].Op == OpAMD64TESTQ {
30576 v_0 := b.Controls[0]
30577 _ = v_0.Args[1]
30578 v_0_0 := v_0.Args[0]
30579 v_0_1 := v_0.Args[1]
30580 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30581 z1 := v_0_0
30582 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30583 continue
30584 }
30585 z1_0 := z1.Args[0]
30586 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30587 continue
30588 }
30589 x := z1_0.Args[0]
30590 z2 := v_0_1
30591 if !(z1 == z2) {
30592 continue
30593 }
30594 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30595 v0.AuxInt = int8ToAuxInt(0)
30596 v0.AddArg(x)
30597 b.resetWithControl(BlockAMD64UGE, v0)
30598 return true
30599 }
30600 break
30601 }
30602
30603
30604
30605 for b.Controls[0].Op == OpAMD64TESTL {
30606 v_0 := b.Controls[0]
30607 _ = v_0.Args[1]
30608 v_0_0 := v_0.Args[0]
30609 v_0_1 := v_0.Args[1]
30610 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30611 z1 := v_0_0
30612 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30613 continue
30614 }
30615 z1_0 := z1.Args[0]
30616 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30617 continue
30618 }
30619 x := z1_0.Args[0]
30620 z2 := v_0_1
30621 if !(z1 == z2) {
30622 continue
30623 }
30624 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30625 v0.AuxInt = int8ToAuxInt(0)
30626 v0.AddArg(x)
30627 b.resetWithControl(BlockAMD64UGE, v0)
30628 return true
30629 }
30630 break
30631 }
30632
30633
30634
30635 for b.Controls[0].Op == OpAMD64TESTQ {
30636 v_0 := b.Controls[0]
30637 _ = v_0.Args[1]
30638 v_0_0 := v_0.Args[0]
30639 v_0_1 := v_0.Args[1]
30640 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30641 z1 := v_0_0
30642 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30643 continue
30644 }
30645 x := z1.Args[0]
30646 z2 := v_0_1
30647 if !(z1 == z2) {
30648 continue
30649 }
30650 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30651 v0.AuxInt = int8ToAuxInt(63)
30652 v0.AddArg(x)
30653 b.resetWithControl(BlockAMD64UGE, v0)
30654 return true
30655 }
30656 break
30657 }
30658
30659
30660
30661 for b.Controls[0].Op == OpAMD64TESTL {
30662 v_0 := b.Controls[0]
30663 _ = v_0.Args[1]
30664 v_0_0 := v_0.Args[0]
30665 v_0_1 := v_0.Args[1]
30666 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30667 z1 := v_0_0
30668 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30669 continue
30670 }
30671 x := z1.Args[0]
30672 z2 := v_0_1
30673 if !(z1 == z2) {
30674 continue
30675 }
30676 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30677 v0.AuxInt = int8ToAuxInt(31)
30678 v0.AddArg(x)
30679 b.resetWithControl(BlockAMD64UGE, v0)
30680 return true
30681 }
30682 break
30683 }
30684
30685
30686 for b.Controls[0].Op == OpAMD64InvertFlags {
30687 v_0 := b.Controls[0]
30688 cmp := v_0.Args[0]
30689 b.resetWithControl(BlockAMD64EQ, cmp)
30690 return true
30691 }
30692
30693
30694 for b.Controls[0].Op == OpAMD64FlagEQ {
30695 b.Reset(BlockFirst)
30696 return true
30697 }
30698
30699
30700 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30701 b.Reset(BlockFirst)
30702 b.swapSuccessors()
30703 return true
30704 }
30705
30706
30707 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30708 b.Reset(BlockFirst)
30709 b.swapSuccessors()
30710 return true
30711 }
30712
30713
30714 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30715 b.Reset(BlockFirst)
30716 b.swapSuccessors()
30717 return true
30718 }
30719
30720
30721 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30722 b.Reset(BlockFirst)
30723 b.swapSuccessors()
30724 return true
30725 }
30726
30727
30728 for b.Controls[0].Op == OpAMD64TESTQ {
30729 v_0 := b.Controls[0]
30730 _ = v_0.Args[1]
30731 v_0_0 := v_0.Args[0]
30732 v_0_1 := v_0.Args[1]
30733 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30734 s := v_0_0
30735 if s.Op != OpSelect0 {
30736 continue
30737 }
30738 blsr := s.Args[0]
30739 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
30740 continue
30741 }
30742 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
30743 v0.AddArg(blsr)
30744 b.resetWithControl(BlockAMD64EQ, v0)
30745 return true
30746 }
30747 break
30748 }
30749
30750
30751 for b.Controls[0].Op == OpAMD64TESTL {
30752 v_0 := b.Controls[0]
30753 _ = v_0.Args[1]
30754 v_0_0 := v_0.Args[0]
30755 v_0_1 := v_0.Args[1]
30756 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30757 s := v_0_0
30758 if s.Op != OpSelect0 {
30759 continue
30760 }
30761 blsr := s.Args[0]
30762 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
30763 continue
30764 }
30765 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
30766 v0.AddArg(blsr)
30767 b.resetWithControl(BlockAMD64EQ, v0)
30768 return true
30769 }
30770 break
30771 }
30772 case BlockAMD64GE:
30773
30774
30775 for b.Controls[0].Op == OpAMD64InvertFlags {
30776 v_0 := b.Controls[0]
30777 cmp := v_0.Args[0]
30778 b.resetWithControl(BlockAMD64LE, cmp)
30779 return true
30780 }
30781
30782
30783 for b.Controls[0].Op == OpAMD64FlagEQ {
30784 b.Reset(BlockFirst)
30785 return true
30786 }
30787
30788
30789 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30790 b.Reset(BlockFirst)
30791 b.swapSuccessors()
30792 return true
30793 }
30794
30795
30796 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30797 b.Reset(BlockFirst)
30798 b.swapSuccessors()
30799 return true
30800 }
30801
30802
30803 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30804 b.Reset(BlockFirst)
30805 return true
30806 }
30807
30808
30809 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30810 b.Reset(BlockFirst)
30811 return true
30812 }
30813 case BlockAMD64GT:
30814
30815
30816 for b.Controls[0].Op == OpAMD64InvertFlags {
30817 v_0 := b.Controls[0]
30818 cmp := v_0.Args[0]
30819 b.resetWithControl(BlockAMD64LT, cmp)
30820 return true
30821 }
30822
30823
30824 for b.Controls[0].Op == OpAMD64FlagEQ {
30825 b.Reset(BlockFirst)
30826 b.swapSuccessors()
30827 return true
30828 }
30829
30830
30831 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30832 b.Reset(BlockFirst)
30833 b.swapSuccessors()
30834 return true
30835 }
30836
30837
30838 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30839 b.Reset(BlockFirst)
30840 b.swapSuccessors()
30841 return true
30842 }
30843
30844
30845 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30846 b.Reset(BlockFirst)
30847 return true
30848 }
30849
30850
30851 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30852 b.Reset(BlockFirst)
30853 return true
30854 }
30855 case BlockIf:
30856
30857
30858 for b.Controls[0].Op == OpAMD64SETL {
30859 v_0 := b.Controls[0]
30860 cmp := v_0.Args[0]
30861 b.resetWithControl(BlockAMD64LT, cmp)
30862 return true
30863 }
30864
30865
30866 for b.Controls[0].Op == OpAMD64SETLE {
30867 v_0 := b.Controls[0]
30868 cmp := v_0.Args[0]
30869 b.resetWithControl(BlockAMD64LE, cmp)
30870 return true
30871 }
30872
30873
30874 for b.Controls[0].Op == OpAMD64SETG {
30875 v_0 := b.Controls[0]
30876 cmp := v_0.Args[0]
30877 b.resetWithControl(BlockAMD64GT, cmp)
30878 return true
30879 }
30880
30881
30882 for b.Controls[0].Op == OpAMD64SETGE {
30883 v_0 := b.Controls[0]
30884 cmp := v_0.Args[0]
30885 b.resetWithControl(BlockAMD64GE, cmp)
30886 return true
30887 }
30888
30889
30890 for b.Controls[0].Op == OpAMD64SETEQ {
30891 v_0 := b.Controls[0]
30892 cmp := v_0.Args[0]
30893 b.resetWithControl(BlockAMD64EQ, cmp)
30894 return true
30895 }
30896
30897
30898 for b.Controls[0].Op == OpAMD64SETNE {
30899 v_0 := b.Controls[0]
30900 cmp := v_0.Args[0]
30901 b.resetWithControl(BlockAMD64NE, cmp)
30902 return true
30903 }
30904
30905
30906 for b.Controls[0].Op == OpAMD64SETB {
30907 v_0 := b.Controls[0]
30908 cmp := v_0.Args[0]
30909 b.resetWithControl(BlockAMD64ULT, cmp)
30910 return true
30911 }
30912
30913
30914 for b.Controls[0].Op == OpAMD64SETBE {
30915 v_0 := b.Controls[0]
30916 cmp := v_0.Args[0]
30917 b.resetWithControl(BlockAMD64ULE, cmp)
30918 return true
30919 }
30920
30921
30922 for b.Controls[0].Op == OpAMD64SETA {
30923 v_0 := b.Controls[0]
30924 cmp := v_0.Args[0]
30925 b.resetWithControl(BlockAMD64UGT, cmp)
30926 return true
30927 }
30928
30929
30930 for b.Controls[0].Op == OpAMD64SETAE {
30931 v_0 := b.Controls[0]
30932 cmp := v_0.Args[0]
30933 b.resetWithControl(BlockAMD64UGE, cmp)
30934 return true
30935 }
30936
30937
30938 for b.Controls[0].Op == OpAMD64SETO {
30939 v_0 := b.Controls[0]
30940 cmp := v_0.Args[0]
30941 b.resetWithControl(BlockAMD64OS, cmp)
30942 return true
30943 }
30944
30945
30946 for b.Controls[0].Op == OpAMD64SETGF {
30947 v_0 := b.Controls[0]
30948 cmp := v_0.Args[0]
30949 b.resetWithControl(BlockAMD64UGT, cmp)
30950 return true
30951 }
30952
30953
30954 for b.Controls[0].Op == OpAMD64SETGEF {
30955 v_0 := b.Controls[0]
30956 cmp := v_0.Args[0]
30957 b.resetWithControl(BlockAMD64UGE, cmp)
30958 return true
30959 }
30960
30961
30962 for b.Controls[0].Op == OpAMD64SETEQF {
30963 v_0 := b.Controls[0]
30964 cmp := v_0.Args[0]
30965 b.resetWithControl(BlockAMD64EQF, cmp)
30966 return true
30967 }
30968
30969
30970 for b.Controls[0].Op == OpAMD64SETNEF {
30971 v_0 := b.Controls[0]
30972 cmp := v_0.Args[0]
30973 b.resetWithControl(BlockAMD64NEF, cmp)
30974 return true
30975 }
30976
30977
30978 for {
30979 cond := b.Controls[0]
30980 v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
30981 v0.AddArg2(cond, cond)
30982 b.resetWithControl(BlockAMD64NE, v0)
30983 return true
30984 }
30985 case BlockJumpTable:
30986
30987
30988 for {
30989 idx := b.Controls[0]
30990 v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr)
30991 v0.Aux = symToAux(makeJumpTableSym(b))
30992 v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr)
30993 v0.AddArg(v1)
30994 b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0)
30995 b.Aux = symToAux(makeJumpTableSym(b))
30996 return true
30997 }
30998 case BlockAMD64LE:
30999
31000
31001 for b.Controls[0].Op == OpAMD64InvertFlags {
31002 v_0 := b.Controls[0]
31003 cmp := v_0.Args[0]
31004 b.resetWithControl(BlockAMD64GE, cmp)
31005 return true
31006 }
31007
31008
31009 for b.Controls[0].Op == OpAMD64FlagEQ {
31010 b.Reset(BlockFirst)
31011 return true
31012 }
31013
31014
31015 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31016 b.Reset(BlockFirst)
31017 return true
31018 }
31019
31020
31021 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31022 b.Reset(BlockFirst)
31023 return true
31024 }
31025
31026
31027 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31028 b.Reset(BlockFirst)
31029 b.swapSuccessors()
31030 return true
31031 }
31032
31033
31034 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31035 b.Reset(BlockFirst)
31036 b.swapSuccessors()
31037 return true
31038 }
31039 case BlockAMD64LT:
31040
31041
31042 for b.Controls[0].Op == OpAMD64InvertFlags {
31043 v_0 := b.Controls[0]
31044 cmp := v_0.Args[0]
31045 b.resetWithControl(BlockAMD64GT, cmp)
31046 return true
31047 }
31048
31049
31050 for b.Controls[0].Op == OpAMD64FlagEQ {
31051 b.Reset(BlockFirst)
31052 b.swapSuccessors()
31053 return true
31054 }
31055
31056
31057 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31058 b.Reset(BlockFirst)
31059 return true
31060 }
31061
31062
31063 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31064 b.Reset(BlockFirst)
31065 return true
31066 }
31067
31068
31069 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31070 b.Reset(BlockFirst)
31071 b.swapSuccessors()
31072 return true
31073 }
31074
31075
31076 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31077 b.Reset(BlockFirst)
31078 b.swapSuccessors()
31079 return true
31080 }
31081 case BlockAMD64NE:
31082
31083
31084 for b.Controls[0].Op == OpAMD64TESTB {
31085 v_0 := b.Controls[0]
31086 _ = v_0.Args[1]
31087 v_0_0 := v_0.Args[0]
31088 if v_0_0.Op != OpAMD64SETL {
31089 break
31090 }
31091 cmp := v_0_0.Args[0]
31092 v_0_1 := v_0.Args[1]
31093 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
31094 break
31095 }
31096 b.resetWithControl(BlockAMD64LT, cmp)
31097 return true
31098 }
31099
31100
31101 for b.Controls[0].Op == OpAMD64TESTB {
31102 v_0 := b.Controls[0]
31103 _ = v_0.Args[1]
31104 v_0_0 := v_0.Args[0]
31105 if v_0_0.Op != OpAMD64SETLE {
31106 break
31107 }
31108 cmp := v_0_0.Args[0]
31109 v_0_1 := v_0.Args[1]
31110 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
31111 break
31112 }
31113 b.resetWithControl(BlockAMD64LE, cmp)
31114 return true
31115 }
31116
31117
31118 for b.Controls[0].Op == OpAMD64TESTB {
31119 v_0 := b.Controls[0]
31120 _ = v_0.Args[1]
31121 v_0_0 := v_0.Args[0]
31122 if v_0_0.Op != OpAMD64SETG {
31123 break
31124 }
31125 cmp := v_0_0.Args[0]
31126 v_0_1 := v_0.Args[1]
31127 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
31128 break
31129 }
31130 b.resetWithControl(BlockAMD64GT, cmp)
31131 return true
31132 }
31133
31134
31135 for b.Controls[0].Op == OpAMD64TESTB {
31136 v_0 := b.Controls[0]
31137 _ = v_0.Args[1]
31138 v_0_0 := v_0.Args[0]
31139 if v_0_0.Op != OpAMD64SETGE {
31140 break
31141 }
31142 cmp := v_0_0.Args[0]
31143 v_0_1 := v_0.Args[1]
31144 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
31145 break
31146 }
31147 b.resetWithControl(BlockAMD64GE, cmp)
31148 return true
31149 }
31150
31151
31152 for b.Controls[0].Op == OpAMD64TESTB {
31153 v_0 := b.Controls[0]
31154 _ = v_0.Args[1]
31155 v_0_0 := v_0.Args[0]
31156 if v_0_0.Op != OpAMD64SETEQ {
31157 break
31158 }
31159 cmp := v_0_0.Args[0]
31160 v_0_1 := v_0.Args[1]
31161 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
31162 break
31163 }
31164 b.resetWithControl(BlockAMD64EQ, cmp)
31165 return true
31166 }
31167
31168
31169 for b.Controls[0].Op == OpAMD64TESTB {
31170 v_0 := b.Controls[0]
31171 _ = v_0.Args[1]
31172 v_0_0 := v_0.Args[0]
31173 if v_0_0.Op != OpAMD64SETNE {
31174 break
31175 }
31176 cmp := v_0_0.Args[0]
31177 v_0_1 := v_0.Args[1]
31178 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
31179 break
31180 }
31181 b.resetWithControl(BlockAMD64NE, cmp)
31182 return true
31183 }
31184
31185
31186 for b.Controls[0].Op == OpAMD64TESTB {
31187 v_0 := b.Controls[0]
31188 _ = v_0.Args[1]
31189 v_0_0 := v_0.Args[0]
31190 if v_0_0.Op != OpAMD64SETB {
31191 break
31192 }
31193 cmp := v_0_0.Args[0]
31194 v_0_1 := v_0.Args[1]
31195 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
31196 break
31197 }
31198 b.resetWithControl(BlockAMD64ULT, cmp)
31199 return true
31200 }
31201
31202
31203 for b.Controls[0].Op == OpAMD64TESTB {
31204 v_0 := b.Controls[0]
31205 _ = v_0.Args[1]
31206 v_0_0 := v_0.Args[0]
31207 if v_0_0.Op != OpAMD64SETBE {
31208 break
31209 }
31210 cmp := v_0_0.Args[0]
31211 v_0_1 := v_0.Args[1]
31212 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
31213 break
31214 }
31215 b.resetWithControl(BlockAMD64ULE, cmp)
31216 return true
31217 }
31218
31219
31220 for b.Controls[0].Op == OpAMD64TESTB {
31221 v_0 := b.Controls[0]
31222 _ = v_0.Args[1]
31223 v_0_0 := v_0.Args[0]
31224 if v_0_0.Op != OpAMD64SETA {
31225 break
31226 }
31227 cmp := v_0_0.Args[0]
31228 v_0_1 := v_0.Args[1]
31229 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
31230 break
31231 }
31232 b.resetWithControl(BlockAMD64UGT, cmp)
31233 return true
31234 }
31235
31236
31237 for b.Controls[0].Op == OpAMD64TESTB {
31238 v_0 := b.Controls[0]
31239 _ = v_0.Args[1]
31240 v_0_0 := v_0.Args[0]
31241 if v_0_0.Op != OpAMD64SETAE {
31242 break
31243 }
31244 cmp := v_0_0.Args[0]
31245 v_0_1 := v_0.Args[1]
31246 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
31247 break
31248 }
31249 b.resetWithControl(BlockAMD64UGE, cmp)
31250 return true
31251 }
31252
31253
31254 for b.Controls[0].Op == OpAMD64TESTB {
31255 v_0 := b.Controls[0]
31256 _ = v_0.Args[1]
31257 v_0_0 := v_0.Args[0]
31258 if v_0_0.Op != OpAMD64SETO {
31259 break
31260 }
31261 cmp := v_0_0.Args[0]
31262 v_0_1 := v_0.Args[1]
31263 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
31264 break
31265 }
31266 b.resetWithControl(BlockAMD64OS, cmp)
31267 return true
31268 }
31269
31270
31271 for b.Controls[0].Op == OpAMD64TESTL {
31272 v_0 := b.Controls[0]
31273 _ = v_0.Args[1]
31274 v_0_0 := v_0.Args[0]
31275 v_0_1 := v_0.Args[1]
31276 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31277 if v_0_0.Op != OpAMD64SHLL {
31278 continue
31279 }
31280 x := v_0_0.Args[1]
31281 v_0_0_0 := v_0_0.Args[0]
31282 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
31283 continue
31284 }
31285 y := v_0_1
31286 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
31287 v0.AddArg2(x, y)
31288 b.resetWithControl(BlockAMD64ULT, v0)
31289 return true
31290 }
31291 break
31292 }
31293
31294
31295 for b.Controls[0].Op == OpAMD64TESTQ {
31296 v_0 := b.Controls[0]
31297 _ = v_0.Args[1]
31298 v_0_0 := v_0.Args[0]
31299 v_0_1 := v_0.Args[1]
31300 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31301 if v_0_0.Op != OpAMD64SHLQ {
31302 continue
31303 }
31304 x := v_0_0.Args[1]
31305 v_0_0_0 := v_0_0.Args[0]
31306 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
31307 continue
31308 }
31309 y := v_0_1
31310 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
31311 v0.AddArg2(x, y)
31312 b.resetWithControl(BlockAMD64ULT, v0)
31313 return true
31314 }
31315 break
31316 }
31317
31318
31319
31320 for b.Controls[0].Op == OpAMD64TESTLconst {
31321 v_0 := b.Controls[0]
31322 c := auxIntToInt32(v_0.AuxInt)
31323 x := v_0.Args[0]
31324 if !(isUint32PowerOfTwo(int64(c))) {
31325 break
31326 }
31327 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31328 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31329 v0.AddArg(x)
31330 b.resetWithControl(BlockAMD64ULT, v0)
31331 return true
31332 }
31333
31334
31335
31336 for b.Controls[0].Op == OpAMD64TESTQconst {
31337 v_0 := b.Controls[0]
31338 c := auxIntToInt32(v_0.AuxInt)
31339 x := v_0.Args[0]
31340 if !(isUint64PowerOfTwo(int64(c))) {
31341 break
31342 }
31343 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31344 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31345 v0.AddArg(x)
31346 b.resetWithControl(BlockAMD64ULT, v0)
31347 return true
31348 }
31349
31350
31351
31352 for b.Controls[0].Op == OpAMD64TESTQ {
31353 v_0 := b.Controls[0]
31354 _ = v_0.Args[1]
31355 v_0_0 := v_0.Args[0]
31356 v_0_1 := v_0.Args[1]
31357 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31358 if v_0_0.Op != OpAMD64MOVQconst {
31359 continue
31360 }
31361 c := auxIntToInt64(v_0_0.AuxInt)
31362 x := v_0_1
31363 if !(isUint64PowerOfTwo(c)) {
31364 continue
31365 }
31366 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31367 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
31368 v0.AddArg(x)
31369 b.resetWithControl(BlockAMD64ULT, v0)
31370 return true
31371 }
31372 break
31373 }
31374
31375
31376
31377 for b.Controls[0].Op == OpAMD64TESTQ {
31378 v_0 := b.Controls[0]
31379 _ = v_0.Args[1]
31380 v_0_0 := v_0.Args[0]
31381 v_0_1 := v_0.Args[1]
31382 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31383 z1 := v_0_0
31384 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
31385 continue
31386 }
31387 z1_0 := z1.Args[0]
31388 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31389 continue
31390 }
31391 x := z1_0.Args[0]
31392 z2 := v_0_1
31393 if !(z1 == z2) {
31394 continue
31395 }
31396 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31397 v0.AuxInt = int8ToAuxInt(63)
31398 v0.AddArg(x)
31399 b.resetWithControl(BlockAMD64ULT, v0)
31400 return true
31401 }
31402 break
31403 }
31404
31405
31406
31407 for b.Controls[0].Op == OpAMD64TESTL {
31408 v_0 := b.Controls[0]
31409 _ = v_0.Args[1]
31410 v_0_0 := v_0.Args[0]
31411 v_0_1 := v_0.Args[1]
31412 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31413 z1 := v_0_0
31414 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
31415 continue
31416 }
31417 z1_0 := z1.Args[0]
31418 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31419 continue
31420 }
31421 x := z1_0.Args[0]
31422 z2 := v_0_1
31423 if !(z1 == z2) {
31424 continue
31425 }
31426 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31427 v0.AuxInt = int8ToAuxInt(31)
31428 v0.AddArg(x)
31429 b.resetWithControl(BlockAMD64ULT, v0)
31430 return true
31431 }
31432 break
31433 }
31434
31435
31436
31437 for b.Controls[0].Op == OpAMD64TESTQ {
31438 v_0 := b.Controls[0]
31439 _ = v_0.Args[1]
31440 v_0_0 := v_0.Args[0]
31441 v_0_1 := v_0.Args[1]
31442 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31443 z1 := v_0_0
31444 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31445 continue
31446 }
31447 z1_0 := z1.Args[0]
31448 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31449 continue
31450 }
31451 x := z1_0.Args[0]
31452 z2 := v_0_1
31453 if !(z1 == z2) {
31454 continue
31455 }
31456 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31457 v0.AuxInt = int8ToAuxInt(0)
31458 v0.AddArg(x)
31459 b.resetWithControl(BlockAMD64ULT, v0)
31460 return true
31461 }
31462 break
31463 }
31464
31465
31466
31467 for b.Controls[0].Op == OpAMD64TESTL {
31468 v_0 := b.Controls[0]
31469 _ = v_0.Args[1]
31470 v_0_0 := v_0.Args[0]
31471 v_0_1 := v_0.Args[1]
31472 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31473 z1 := v_0_0
31474 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31475 continue
31476 }
31477 z1_0 := z1.Args[0]
31478 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31479 continue
31480 }
31481 x := z1_0.Args[0]
31482 z2 := v_0_1
31483 if !(z1 == z2) {
31484 continue
31485 }
31486 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31487 v0.AuxInt = int8ToAuxInt(0)
31488 v0.AddArg(x)
31489 b.resetWithControl(BlockAMD64ULT, v0)
31490 return true
31491 }
31492 break
31493 }
31494
31495
31496
31497 for b.Controls[0].Op == OpAMD64TESTQ {
31498 v_0 := b.Controls[0]
31499 _ = v_0.Args[1]
31500 v_0_0 := v_0.Args[0]
31501 v_0_1 := v_0.Args[1]
31502 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31503 z1 := v_0_0
31504 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31505 continue
31506 }
31507 x := z1.Args[0]
31508 z2 := v_0_1
31509 if !(z1 == z2) {
31510 continue
31511 }
31512 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31513 v0.AuxInt = int8ToAuxInt(63)
31514 v0.AddArg(x)
31515 b.resetWithControl(BlockAMD64ULT, v0)
31516 return true
31517 }
31518 break
31519 }
31520
31521
31522
31523 for b.Controls[0].Op == OpAMD64TESTL {
31524 v_0 := b.Controls[0]
31525 _ = v_0.Args[1]
31526 v_0_0 := v_0.Args[0]
31527 v_0_1 := v_0.Args[1]
31528 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31529 z1 := v_0_0
31530 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31531 continue
31532 }
31533 x := z1.Args[0]
31534 z2 := v_0_1
31535 if !(z1 == z2) {
31536 continue
31537 }
31538 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31539 v0.AuxInt = int8ToAuxInt(31)
31540 v0.AddArg(x)
31541 b.resetWithControl(BlockAMD64ULT, v0)
31542 return true
31543 }
31544 break
31545 }
31546
31547
31548 for b.Controls[0].Op == OpAMD64TESTB {
31549 v_0 := b.Controls[0]
31550 _ = v_0.Args[1]
31551 v_0_0 := v_0.Args[0]
31552 if v_0_0.Op != OpAMD64SETGF {
31553 break
31554 }
31555 cmp := v_0_0.Args[0]
31556 v_0_1 := v_0.Args[1]
31557 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
31558 break
31559 }
31560 b.resetWithControl(BlockAMD64UGT, cmp)
31561 return true
31562 }
31563
31564
31565 for b.Controls[0].Op == OpAMD64TESTB {
31566 v_0 := b.Controls[0]
31567 _ = v_0.Args[1]
31568 v_0_0 := v_0.Args[0]
31569 if v_0_0.Op != OpAMD64SETGEF {
31570 break
31571 }
31572 cmp := v_0_0.Args[0]
31573 v_0_1 := v_0.Args[1]
31574 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
31575 break
31576 }
31577 b.resetWithControl(BlockAMD64UGE, cmp)
31578 return true
31579 }
31580
31581
31582 for b.Controls[0].Op == OpAMD64TESTB {
31583 v_0 := b.Controls[0]
31584 _ = v_0.Args[1]
31585 v_0_0 := v_0.Args[0]
31586 if v_0_0.Op != OpAMD64SETEQF {
31587 break
31588 }
31589 cmp := v_0_0.Args[0]
31590 v_0_1 := v_0.Args[1]
31591 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
31592 break
31593 }
31594 b.resetWithControl(BlockAMD64EQF, cmp)
31595 return true
31596 }
31597
31598
31599 for b.Controls[0].Op == OpAMD64TESTB {
31600 v_0 := b.Controls[0]
31601 _ = v_0.Args[1]
31602 v_0_0 := v_0.Args[0]
31603 if v_0_0.Op != OpAMD64SETNEF {
31604 break
31605 }
31606 cmp := v_0_0.Args[0]
31607 v_0_1 := v_0.Args[1]
31608 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
31609 break
31610 }
31611 b.resetWithControl(BlockAMD64NEF, cmp)
31612 return true
31613 }
31614
31615
31616 for b.Controls[0].Op == OpAMD64InvertFlags {
31617 v_0 := b.Controls[0]
31618 cmp := v_0.Args[0]
31619 b.resetWithControl(BlockAMD64NE, cmp)
31620 return true
31621 }
31622
31623
31624 for b.Controls[0].Op == OpAMD64FlagEQ {
31625 b.Reset(BlockFirst)
31626 b.swapSuccessors()
31627 return true
31628 }
31629
31630
31631 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31632 b.Reset(BlockFirst)
31633 return true
31634 }
31635
31636
31637 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31638 b.Reset(BlockFirst)
31639 return true
31640 }
31641
31642
31643 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31644 b.Reset(BlockFirst)
31645 return true
31646 }
31647
31648
31649 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31650 b.Reset(BlockFirst)
31651 return true
31652 }
31653
31654
31655 for b.Controls[0].Op == OpAMD64TESTQ {
31656 v_0 := b.Controls[0]
31657 _ = v_0.Args[1]
31658 v_0_0 := v_0.Args[0]
31659 v_0_1 := v_0.Args[1]
31660 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31661 s := v_0_0
31662 if s.Op != OpSelect0 {
31663 continue
31664 }
31665 blsr := s.Args[0]
31666 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
31667 continue
31668 }
31669 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31670 v0.AddArg(blsr)
31671 b.resetWithControl(BlockAMD64NE, v0)
31672 return true
31673 }
31674 break
31675 }
31676
31677
31678 for b.Controls[0].Op == OpAMD64TESTL {
31679 v_0 := b.Controls[0]
31680 _ = v_0.Args[1]
31681 v_0_0 := v_0.Args[0]
31682 v_0_1 := v_0.Args[1]
31683 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31684 s := v_0_0
31685 if s.Op != OpSelect0 {
31686 continue
31687 }
31688 blsr := s.Args[0]
31689 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
31690 continue
31691 }
31692 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31693 v0.AddArg(blsr)
31694 b.resetWithControl(BlockAMD64NE, v0)
31695 return true
31696 }
31697 break
31698 }
31699 case BlockAMD64UGE:
31700
31701
31702 for b.Controls[0].Op == OpAMD64TESTQ {
31703 v_0 := b.Controls[0]
31704 x := v_0.Args[1]
31705 if x != v_0.Args[0] {
31706 break
31707 }
31708 b.Reset(BlockFirst)
31709 return true
31710 }
31711
31712
31713 for b.Controls[0].Op == OpAMD64TESTL {
31714 v_0 := b.Controls[0]
31715 x := v_0.Args[1]
31716 if x != v_0.Args[0] {
31717 break
31718 }
31719 b.Reset(BlockFirst)
31720 return true
31721 }
31722
31723
31724 for b.Controls[0].Op == OpAMD64TESTW {
31725 v_0 := b.Controls[0]
31726 x := v_0.Args[1]
31727 if x != v_0.Args[0] {
31728 break
31729 }
31730 b.Reset(BlockFirst)
31731 return true
31732 }
31733
31734
31735 for b.Controls[0].Op == OpAMD64TESTB {
31736 v_0 := b.Controls[0]
31737 x := v_0.Args[1]
31738 if x != v_0.Args[0] {
31739 break
31740 }
31741 b.Reset(BlockFirst)
31742 return true
31743 }
31744
31745
31746 for b.Controls[0].Op == OpAMD64InvertFlags {
31747 v_0 := b.Controls[0]
31748 cmp := v_0.Args[0]
31749 b.resetWithControl(BlockAMD64ULE, cmp)
31750 return true
31751 }
31752
31753
31754 for b.Controls[0].Op == OpAMD64FlagEQ {
31755 b.Reset(BlockFirst)
31756 return true
31757 }
31758
31759
31760 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31761 b.Reset(BlockFirst)
31762 b.swapSuccessors()
31763 return true
31764 }
31765
31766
31767 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31768 b.Reset(BlockFirst)
31769 return true
31770 }
31771
31772
31773 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31774 b.Reset(BlockFirst)
31775 b.swapSuccessors()
31776 return true
31777 }
31778
31779
31780 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31781 b.Reset(BlockFirst)
31782 return true
31783 }
31784 case BlockAMD64UGT:
31785
31786
31787 for b.Controls[0].Op == OpAMD64InvertFlags {
31788 v_0 := b.Controls[0]
31789 cmp := v_0.Args[0]
31790 b.resetWithControl(BlockAMD64ULT, cmp)
31791 return true
31792 }
31793
31794
31795 for b.Controls[0].Op == OpAMD64FlagEQ {
31796 b.Reset(BlockFirst)
31797 b.swapSuccessors()
31798 return true
31799 }
31800
31801
31802 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31803 b.Reset(BlockFirst)
31804 b.swapSuccessors()
31805 return true
31806 }
31807
31808
31809 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31810 b.Reset(BlockFirst)
31811 return true
31812 }
31813
31814
31815 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31816 b.Reset(BlockFirst)
31817 b.swapSuccessors()
31818 return true
31819 }
31820
31821
31822 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31823 b.Reset(BlockFirst)
31824 return true
31825 }
31826 case BlockAMD64ULE:
31827
31828
31829 for b.Controls[0].Op == OpAMD64InvertFlags {
31830 v_0 := b.Controls[0]
31831 cmp := v_0.Args[0]
31832 b.resetWithControl(BlockAMD64UGE, cmp)
31833 return true
31834 }
31835
31836
31837 for b.Controls[0].Op == OpAMD64FlagEQ {
31838 b.Reset(BlockFirst)
31839 return true
31840 }
31841
31842
31843 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31844 b.Reset(BlockFirst)
31845 return true
31846 }
31847
31848
31849 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31850 b.Reset(BlockFirst)
31851 b.swapSuccessors()
31852 return true
31853 }
31854
31855
31856 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31857 b.Reset(BlockFirst)
31858 return true
31859 }
31860
31861
31862 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31863 b.Reset(BlockFirst)
31864 b.swapSuccessors()
31865 return true
31866 }
31867 case BlockAMD64ULT:
31868
31869
31870 for b.Controls[0].Op == OpAMD64TESTQ {
31871 v_0 := b.Controls[0]
31872 x := v_0.Args[1]
31873 if x != v_0.Args[0] {
31874 break
31875 }
31876 b.Reset(BlockFirst)
31877 b.swapSuccessors()
31878 return true
31879 }
31880
31881
31882 for b.Controls[0].Op == OpAMD64TESTL {
31883 v_0 := b.Controls[0]
31884 x := v_0.Args[1]
31885 if x != v_0.Args[0] {
31886 break
31887 }
31888 b.Reset(BlockFirst)
31889 b.swapSuccessors()
31890 return true
31891 }
31892
31893
31894 for b.Controls[0].Op == OpAMD64TESTW {
31895 v_0 := b.Controls[0]
31896 x := v_0.Args[1]
31897 if x != v_0.Args[0] {
31898 break
31899 }
31900 b.Reset(BlockFirst)
31901 b.swapSuccessors()
31902 return true
31903 }
31904
31905
31906 for b.Controls[0].Op == OpAMD64TESTB {
31907 v_0 := b.Controls[0]
31908 x := v_0.Args[1]
31909 if x != v_0.Args[0] {
31910 break
31911 }
31912 b.Reset(BlockFirst)
31913 b.swapSuccessors()
31914 return true
31915 }
31916
31917
31918 for b.Controls[0].Op == OpAMD64InvertFlags {
31919 v_0 := b.Controls[0]
31920 cmp := v_0.Args[0]
31921 b.resetWithControl(BlockAMD64UGT, cmp)
31922 return true
31923 }
31924
31925
31926 for b.Controls[0].Op == OpAMD64FlagEQ {
31927 b.Reset(BlockFirst)
31928 b.swapSuccessors()
31929 return true
31930 }
31931
31932
31933 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31934 b.Reset(BlockFirst)
31935 return true
31936 }
31937
31938
31939 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31940 b.Reset(BlockFirst)
31941 b.swapSuccessors()
31942 return true
31943 }
31944
31945
31946 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31947 b.Reset(BlockFirst)
31948 return true
31949 }
31950
31951
31952 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31953 b.Reset(BlockFirst)
31954 b.swapSuccessors()
31955 return true
31956 }
31957 }
31958 return false
31959 }
31960
View as plain text