1// Code generated from _gen/AMD64.rules using 'go generate'; DO NOT EDIT.
2
3package ssa
4
5import "internal/buildcfg"
6import "math"
7import "cmd/internal/obj"
8import "cmd/compile/internal/types"
9
10func rewriteValueAMD64(v *Value) bool {
11	switch v.Op {
12	case OpAMD64ADCQ:
13		return rewriteValueAMD64_OpAMD64ADCQ(v)
14	case OpAMD64ADCQconst:
15		return rewriteValueAMD64_OpAMD64ADCQconst(v)
16	case OpAMD64ADDL:
17		return rewriteValueAMD64_OpAMD64ADDL(v)
18	case OpAMD64ADDLconst:
19		return rewriteValueAMD64_OpAMD64ADDLconst(v)
20	case OpAMD64ADDLconstmodify:
21		return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
22	case OpAMD64ADDLload:
23		return rewriteValueAMD64_OpAMD64ADDLload(v)
24	case OpAMD64ADDLmodify:
25		return rewriteValueAMD64_OpAMD64ADDLmodify(v)
26	case OpAMD64ADDQ:
27		return rewriteValueAMD64_OpAMD64ADDQ(v)
28	case OpAMD64ADDQcarry:
29		return rewriteValueAMD64_OpAMD64ADDQcarry(v)
30	case OpAMD64ADDQconst:
31		return rewriteValueAMD64_OpAMD64ADDQconst(v)
32	case OpAMD64ADDQconstmodify:
33		return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
34	case OpAMD64ADDQload:
35		return rewriteValueAMD64_OpAMD64ADDQload(v)
36	case OpAMD64ADDQmodify:
37		return rewriteValueAMD64_OpAMD64ADDQmodify(v)
38	case OpAMD64ADDSD:
39		return rewriteValueAMD64_OpAMD64ADDSD(v)
40	case OpAMD64ADDSDload:
41		return rewriteValueAMD64_OpAMD64ADDSDload(v)
42	case OpAMD64ADDSS:
43		return rewriteValueAMD64_OpAMD64ADDSS(v)
44	case OpAMD64ADDSSload:
45		return rewriteValueAMD64_OpAMD64ADDSSload(v)
46	case OpAMD64ANDL:
47		return rewriteValueAMD64_OpAMD64ANDL(v)
48	case OpAMD64ANDLconst:
49		return rewriteValueAMD64_OpAMD64ANDLconst(v)
50	case OpAMD64ANDLconstmodify:
51		return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
52	case OpAMD64ANDLload:
53		return rewriteValueAMD64_OpAMD64ANDLload(v)
54	case OpAMD64ANDLmodify:
55		return rewriteValueAMD64_OpAMD64ANDLmodify(v)
56	case OpAMD64ANDNL:
57		return rewriteValueAMD64_OpAMD64ANDNL(v)
58	case OpAMD64ANDNQ:
59		return rewriteValueAMD64_OpAMD64ANDNQ(v)
60	case OpAMD64ANDQ:
61		return rewriteValueAMD64_OpAMD64ANDQ(v)
62	case OpAMD64ANDQconst:
63		return rewriteValueAMD64_OpAMD64ANDQconst(v)
64	case OpAMD64ANDQconstmodify:
65		return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
66	case OpAMD64ANDQload:
67		return rewriteValueAMD64_OpAMD64ANDQload(v)
68	case OpAMD64ANDQmodify:
69		return rewriteValueAMD64_OpAMD64ANDQmodify(v)
70	case OpAMD64BSFQ:
71		return rewriteValueAMD64_OpAMD64BSFQ(v)
72	case OpAMD64BSWAPL:
73		return rewriteValueAMD64_OpAMD64BSWAPL(v)
74	case OpAMD64BSWAPQ:
75		return rewriteValueAMD64_OpAMD64BSWAPQ(v)
76	case OpAMD64BTCQconst:
77		return rewriteValueAMD64_OpAMD64BTCQconst(v)
78	case OpAMD64BTLconst:
79		return rewriteValueAMD64_OpAMD64BTLconst(v)
80	case OpAMD64BTQconst:
81		return rewriteValueAMD64_OpAMD64BTQconst(v)
82	case OpAMD64BTRQconst:
83		return rewriteValueAMD64_OpAMD64BTRQconst(v)
84	case OpAMD64BTSQconst:
85		return rewriteValueAMD64_OpAMD64BTSQconst(v)
86	case OpAMD64CMOVLCC:
87		return rewriteValueAMD64_OpAMD64CMOVLCC(v)
88	case OpAMD64CMOVLCS:
89		return rewriteValueAMD64_OpAMD64CMOVLCS(v)
90	case OpAMD64CMOVLEQ:
91		return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
92	case OpAMD64CMOVLGE:
93		return rewriteValueAMD64_OpAMD64CMOVLGE(v)
94	case OpAMD64CMOVLGT:
95		return rewriteValueAMD64_OpAMD64CMOVLGT(v)
96	case OpAMD64CMOVLHI:
97		return rewriteValueAMD64_OpAMD64CMOVLHI(v)
98	case OpAMD64CMOVLLE:
99		return rewriteValueAMD64_OpAMD64CMOVLLE(v)
100	case OpAMD64CMOVLLS:
101		return rewriteValueAMD64_OpAMD64CMOVLLS(v)
102	case OpAMD64CMOVLLT:
103		return rewriteValueAMD64_OpAMD64CMOVLLT(v)
104	case OpAMD64CMOVLNE:
105		return rewriteValueAMD64_OpAMD64CMOVLNE(v)
106	case OpAMD64CMOVQCC:
107		return rewriteValueAMD64_OpAMD64CMOVQCC(v)
108	case OpAMD64CMOVQCS:
109		return rewriteValueAMD64_OpAMD64CMOVQCS(v)
110	case OpAMD64CMOVQEQ:
111		return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
112	case OpAMD64CMOVQGE:
113		return rewriteValueAMD64_OpAMD64CMOVQGE(v)
114	case OpAMD64CMOVQGT:
115		return rewriteValueAMD64_OpAMD64CMOVQGT(v)
116	case OpAMD64CMOVQHI:
117		return rewriteValueAMD64_OpAMD64CMOVQHI(v)
118	case OpAMD64CMOVQLE:
119		return rewriteValueAMD64_OpAMD64CMOVQLE(v)
120	case OpAMD64CMOVQLS:
121		return rewriteValueAMD64_OpAMD64CMOVQLS(v)
122	case OpAMD64CMOVQLT:
123		return rewriteValueAMD64_OpAMD64CMOVQLT(v)
124	case OpAMD64CMOVQNE:
125		return rewriteValueAMD64_OpAMD64CMOVQNE(v)
126	case OpAMD64CMOVWCC:
127		return rewriteValueAMD64_OpAMD64CMOVWCC(v)
128	case OpAMD64CMOVWCS:
129		return rewriteValueAMD64_OpAMD64CMOVWCS(v)
130	case OpAMD64CMOVWEQ:
131		return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
132	case OpAMD64CMOVWGE:
133		return rewriteValueAMD64_OpAMD64CMOVWGE(v)
134	case OpAMD64CMOVWGT:
135		return rewriteValueAMD64_OpAMD64CMOVWGT(v)
136	case OpAMD64CMOVWHI:
137		return rewriteValueAMD64_OpAMD64CMOVWHI(v)
138	case OpAMD64CMOVWLE:
139		return rewriteValueAMD64_OpAMD64CMOVWLE(v)
140	case OpAMD64CMOVWLS:
141		return rewriteValueAMD64_OpAMD64CMOVWLS(v)
142	case OpAMD64CMOVWLT:
143		return rewriteValueAMD64_OpAMD64CMOVWLT(v)
144	case OpAMD64CMOVWNE:
145		return rewriteValueAMD64_OpAMD64CMOVWNE(v)
146	case OpAMD64CMPB:
147		return rewriteValueAMD64_OpAMD64CMPB(v)
148	case OpAMD64CMPBconst:
149		return rewriteValueAMD64_OpAMD64CMPBconst(v)
150	case OpAMD64CMPBconstload:
151		return rewriteValueAMD64_OpAMD64CMPBconstload(v)
152	case OpAMD64CMPBload:
153		return rewriteValueAMD64_OpAMD64CMPBload(v)
154	case OpAMD64CMPL:
155		return rewriteValueAMD64_OpAMD64CMPL(v)
156	case OpAMD64CMPLconst:
157		return rewriteValueAMD64_OpAMD64CMPLconst(v)
158	case OpAMD64CMPLconstload:
159		return rewriteValueAMD64_OpAMD64CMPLconstload(v)
160	case OpAMD64CMPLload:
161		return rewriteValueAMD64_OpAMD64CMPLload(v)
162	case OpAMD64CMPQ:
163		return rewriteValueAMD64_OpAMD64CMPQ(v)
164	case OpAMD64CMPQconst:
165		return rewriteValueAMD64_OpAMD64CMPQconst(v)
166	case OpAMD64CMPQconstload:
167		return rewriteValueAMD64_OpAMD64CMPQconstload(v)
168	case OpAMD64CMPQload:
169		return rewriteValueAMD64_OpAMD64CMPQload(v)
170	case OpAMD64CMPW:
171		return rewriteValueAMD64_OpAMD64CMPW(v)
172	case OpAMD64CMPWconst:
173		return rewriteValueAMD64_OpAMD64CMPWconst(v)
174	case OpAMD64CMPWconstload:
175		return rewriteValueAMD64_OpAMD64CMPWconstload(v)
176	case OpAMD64CMPWload:
177		return rewriteValueAMD64_OpAMD64CMPWload(v)
178	case OpAMD64CMPXCHGLlock:
179		return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
180	case OpAMD64CMPXCHGQlock:
181		return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
182	case OpAMD64DIVSD:
183		return rewriteValueAMD64_OpAMD64DIVSD(v)
184	case OpAMD64DIVSDload:
185		return rewriteValueAMD64_OpAMD64DIVSDload(v)
186	case OpAMD64DIVSS:
187		return rewriteValueAMD64_OpAMD64DIVSS(v)
188	case OpAMD64DIVSSload:
189		return rewriteValueAMD64_OpAMD64DIVSSload(v)
190	case OpAMD64HMULL:
191		return rewriteValueAMD64_OpAMD64HMULL(v)
192	case OpAMD64HMULLU:
193		return rewriteValueAMD64_OpAMD64HMULLU(v)
194	case OpAMD64HMULQ:
195		return rewriteValueAMD64_OpAMD64HMULQ(v)
196	case OpAMD64HMULQU:
197		return rewriteValueAMD64_OpAMD64HMULQU(v)
198	case OpAMD64LEAL:
199		return rewriteValueAMD64_OpAMD64LEAL(v)
200	case OpAMD64LEAL1:
201		return rewriteValueAMD64_OpAMD64LEAL1(v)
202	case OpAMD64LEAL2:
203		return rewriteValueAMD64_OpAMD64LEAL2(v)
204	case OpAMD64LEAL4:
205		return rewriteValueAMD64_OpAMD64LEAL4(v)
206	case OpAMD64LEAL8:
207		return rewriteValueAMD64_OpAMD64LEAL8(v)
208	case OpAMD64LEAQ:
209		return rewriteValueAMD64_OpAMD64LEAQ(v)
210	case OpAMD64LEAQ1:
211		return rewriteValueAMD64_OpAMD64LEAQ1(v)
212	case OpAMD64LEAQ2:
213		return rewriteValueAMD64_OpAMD64LEAQ2(v)
214	case OpAMD64LEAQ4:
215		return rewriteValueAMD64_OpAMD64LEAQ4(v)
216	case OpAMD64LEAQ8:
217		return rewriteValueAMD64_OpAMD64LEAQ8(v)
218	case OpAMD64MOVBELstore:
219		return rewriteValueAMD64_OpAMD64MOVBELstore(v)
220	case OpAMD64MOVBEQstore:
221		return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
222	case OpAMD64MOVBEWstore:
223		return rewriteValueAMD64_OpAMD64MOVBEWstore(v)
224	case OpAMD64MOVBQSX:
225		return rewriteValueAMD64_OpAMD64MOVBQSX(v)
226	case OpAMD64MOVBQSXload:
227		return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
228	case OpAMD64MOVBQZX:
229		return rewriteValueAMD64_OpAMD64MOVBQZX(v)
230	case OpAMD64MOVBatomicload:
231		return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
232	case OpAMD64MOVBload:
233		return rewriteValueAMD64_OpAMD64MOVBload(v)
234	case OpAMD64MOVBstore:
235		return rewriteValueAMD64_OpAMD64MOVBstore(v)
236	case OpAMD64MOVBstoreconst:
237		return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
238	case OpAMD64MOVLQSX:
239		return rewriteValueAMD64_OpAMD64MOVLQSX(v)
240	case OpAMD64MOVLQSXload:
241		return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
242	case OpAMD64MOVLQZX:
243		return rewriteValueAMD64_OpAMD64MOVLQZX(v)
244	case OpAMD64MOVLatomicload:
245		return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
246	case OpAMD64MOVLf2i:
247		return rewriteValueAMD64_OpAMD64MOVLf2i(v)
248	case OpAMD64MOVLi2f:
249		return rewriteValueAMD64_OpAMD64MOVLi2f(v)
250	case OpAMD64MOVLload:
251		return rewriteValueAMD64_OpAMD64MOVLload(v)
252	case OpAMD64MOVLstore:
253		return rewriteValueAMD64_OpAMD64MOVLstore(v)
254	case OpAMD64MOVLstoreconst:
255		return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
256	case OpAMD64MOVOload:
257		return rewriteValueAMD64_OpAMD64MOVOload(v)
258	case OpAMD64MOVOstore:
259		return rewriteValueAMD64_OpAMD64MOVOstore(v)
260	case OpAMD64MOVOstoreconst:
261		return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
262	case OpAMD64MOVQatomicload:
263		return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
264	case OpAMD64MOVQf2i:
265		return rewriteValueAMD64_OpAMD64MOVQf2i(v)
266	case OpAMD64MOVQi2f:
267		return rewriteValueAMD64_OpAMD64MOVQi2f(v)
268	case OpAMD64MOVQload:
269		return rewriteValueAMD64_OpAMD64MOVQload(v)
270	case OpAMD64MOVQstore:
271		return rewriteValueAMD64_OpAMD64MOVQstore(v)
272	case OpAMD64MOVQstoreconst:
273		return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
274	case OpAMD64MOVSDload:
275		return rewriteValueAMD64_OpAMD64MOVSDload(v)
276	case OpAMD64MOVSDstore:
277		return rewriteValueAMD64_OpAMD64MOVSDstore(v)
278	case OpAMD64MOVSSload:
279		return rewriteValueAMD64_OpAMD64MOVSSload(v)
280	case OpAMD64MOVSSstore:
281		return rewriteValueAMD64_OpAMD64MOVSSstore(v)
282	case OpAMD64MOVWQSX:
283		return rewriteValueAMD64_OpAMD64MOVWQSX(v)
284	case OpAMD64MOVWQSXload:
285		return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
286	case OpAMD64MOVWQZX:
287		return rewriteValueAMD64_OpAMD64MOVWQZX(v)
288	case OpAMD64MOVWload:
289		return rewriteValueAMD64_OpAMD64MOVWload(v)
290	case OpAMD64MOVWstore:
291		return rewriteValueAMD64_OpAMD64MOVWstore(v)
292	case OpAMD64MOVWstoreconst:
293		return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
294	case OpAMD64MULL:
295		return rewriteValueAMD64_OpAMD64MULL(v)
296	case OpAMD64MULLconst:
297		return rewriteValueAMD64_OpAMD64MULLconst(v)
298	case OpAMD64MULQ:
299		return rewriteValueAMD64_OpAMD64MULQ(v)
300	case OpAMD64MULQconst:
301		return rewriteValueAMD64_OpAMD64MULQconst(v)
302	case OpAMD64MULSD:
303		return rewriteValueAMD64_OpAMD64MULSD(v)
304	case OpAMD64MULSDload:
305		return rewriteValueAMD64_OpAMD64MULSDload(v)
306	case OpAMD64MULSS:
307		return rewriteValueAMD64_OpAMD64MULSS(v)
308	case OpAMD64MULSSload:
309		return rewriteValueAMD64_OpAMD64MULSSload(v)
310	case OpAMD64NEGL:
311		return rewriteValueAMD64_OpAMD64NEGL(v)
312	case OpAMD64NEGQ:
313		return rewriteValueAMD64_OpAMD64NEGQ(v)
314	case OpAMD64NOTL:
315		return rewriteValueAMD64_OpAMD64NOTL(v)
316	case OpAMD64NOTQ:
317		return rewriteValueAMD64_OpAMD64NOTQ(v)
318	case OpAMD64ORL:
319		return rewriteValueAMD64_OpAMD64ORL(v)
320	case OpAMD64ORLconst:
321		return rewriteValueAMD64_OpAMD64ORLconst(v)
322	case OpAMD64ORLconstmodify:
323		return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
324	case OpAMD64ORLload:
325		return rewriteValueAMD64_OpAMD64ORLload(v)
326	case OpAMD64ORLmodify:
327		return rewriteValueAMD64_OpAMD64ORLmodify(v)
328	case OpAMD64ORQ:
329		return rewriteValueAMD64_OpAMD64ORQ(v)
330	case OpAMD64ORQconst:
331		return rewriteValueAMD64_OpAMD64ORQconst(v)
332	case OpAMD64ORQconstmodify:
333		return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
334	case OpAMD64ORQload:
335		return rewriteValueAMD64_OpAMD64ORQload(v)
336	case OpAMD64ORQmodify:
337		return rewriteValueAMD64_OpAMD64ORQmodify(v)
338	case OpAMD64ROLB:
339		return rewriteValueAMD64_OpAMD64ROLB(v)
340	case OpAMD64ROLBconst:
341		return rewriteValueAMD64_OpAMD64ROLBconst(v)
342	case OpAMD64ROLL:
343		return rewriteValueAMD64_OpAMD64ROLL(v)
344	case OpAMD64ROLLconst:
345		return rewriteValueAMD64_OpAMD64ROLLconst(v)
346	case OpAMD64ROLQ:
347		return rewriteValueAMD64_OpAMD64ROLQ(v)
348	case OpAMD64ROLQconst:
349		return rewriteValueAMD64_OpAMD64ROLQconst(v)
350	case OpAMD64ROLW:
351		return rewriteValueAMD64_OpAMD64ROLW(v)
352	case OpAMD64ROLWconst:
353		return rewriteValueAMD64_OpAMD64ROLWconst(v)
354	case OpAMD64RORB:
355		return rewriteValueAMD64_OpAMD64RORB(v)
356	case OpAMD64RORL:
357		return rewriteValueAMD64_OpAMD64RORL(v)
358	case OpAMD64RORQ:
359		return rewriteValueAMD64_OpAMD64RORQ(v)
360	case OpAMD64RORW:
361		return rewriteValueAMD64_OpAMD64RORW(v)
362	case OpAMD64SARB:
363		return rewriteValueAMD64_OpAMD64SARB(v)
364	case OpAMD64SARBconst:
365		return rewriteValueAMD64_OpAMD64SARBconst(v)
366	case OpAMD64SARL:
367		return rewriteValueAMD64_OpAMD64SARL(v)
368	case OpAMD64SARLconst:
369		return rewriteValueAMD64_OpAMD64SARLconst(v)
370	case OpAMD64SARQ:
371		return rewriteValueAMD64_OpAMD64SARQ(v)
372	case OpAMD64SARQconst:
373		return rewriteValueAMD64_OpAMD64SARQconst(v)
374	case OpAMD64SARW:
375		return rewriteValueAMD64_OpAMD64SARW(v)
376	case OpAMD64SARWconst:
377		return rewriteValueAMD64_OpAMD64SARWconst(v)
378	case OpAMD64SARXLload:
379		return rewriteValueAMD64_OpAMD64SARXLload(v)
380	case OpAMD64SARXQload:
381		return rewriteValueAMD64_OpAMD64SARXQload(v)
382	case OpAMD64SBBLcarrymask:
383		return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
384	case OpAMD64SBBQ:
385		return rewriteValueAMD64_OpAMD64SBBQ(v)
386	case OpAMD64SBBQcarrymask:
387		return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
388	case OpAMD64SBBQconst:
389		return rewriteValueAMD64_OpAMD64SBBQconst(v)
390	case OpAMD64SETA:
391		return rewriteValueAMD64_OpAMD64SETA(v)
392	case OpAMD64SETAE:
393		return rewriteValueAMD64_OpAMD64SETAE(v)
394	case OpAMD64SETAEstore:
395		return rewriteValueAMD64_OpAMD64SETAEstore(v)
396	case OpAMD64SETAstore:
397		return rewriteValueAMD64_OpAMD64SETAstore(v)
398	case OpAMD64SETB:
399		return rewriteValueAMD64_OpAMD64SETB(v)
400	case OpAMD64SETBE:
401		return rewriteValueAMD64_OpAMD64SETBE(v)
402	case OpAMD64SETBEstore:
403		return rewriteValueAMD64_OpAMD64SETBEstore(v)
404	case OpAMD64SETBstore:
405		return rewriteValueAMD64_OpAMD64SETBstore(v)
406	case OpAMD64SETEQ:
407		return rewriteValueAMD64_OpAMD64SETEQ(v)
408	case OpAMD64SETEQstore:
409		return rewriteValueAMD64_OpAMD64SETEQstore(v)
410	case OpAMD64SETG:
411		return rewriteValueAMD64_OpAMD64SETG(v)
412	case OpAMD64SETGE:
413		return rewriteValueAMD64_OpAMD64SETGE(v)
414	case OpAMD64SETGEstore:
415		return rewriteValueAMD64_OpAMD64SETGEstore(v)
416	case OpAMD64SETGstore:
417		return rewriteValueAMD64_OpAMD64SETGstore(v)
418	case OpAMD64SETL:
419		return rewriteValueAMD64_OpAMD64SETL(v)
420	case OpAMD64SETLE:
421		return rewriteValueAMD64_OpAMD64SETLE(v)
422	case OpAMD64SETLEstore:
423		return rewriteValueAMD64_OpAMD64SETLEstore(v)
424	case OpAMD64SETLstore:
425		return rewriteValueAMD64_OpAMD64SETLstore(v)
426	case OpAMD64SETNE:
427		return rewriteValueAMD64_OpAMD64SETNE(v)
428	case OpAMD64SETNEstore:
429		return rewriteValueAMD64_OpAMD64SETNEstore(v)
430	case OpAMD64SHLL:
431		return rewriteValueAMD64_OpAMD64SHLL(v)
432	case OpAMD64SHLLconst:
433		return rewriteValueAMD64_OpAMD64SHLLconst(v)
434	case OpAMD64SHLQ:
435		return rewriteValueAMD64_OpAMD64SHLQ(v)
436	case OpAMD64SHLQconst:
437		return rewriteValueAMD64_OpAMD64SHLQconst(v)
438	case OpAMD64SHLXLload:
439		return rewriteValueAMD64_OpAMD64SHLXLload(v)
440	case OpAMD64SHLXQload:
441		return rewriteValueAMD64_OpAMD64SHLXQload(v)
442	case OpAMD64SHRB:
443		return rewriteValueAMD64_OpAMD64SHRB(v)
444	case OpAMD64SHRBconst:
445		return rewriteValueAMD64_OpAMD64SHRBconst(v)
446	case OpAMD64SHRL:
447		return rewriteValueAMD64_OpAMD64SHRL(v)
448	case OpAMD64SHRLconst:
449		return rewriteValueAMD64_OpAMD64SHRLconst(v)
450	case OpAMD64SHRQ:
451		return rewriteValueAMD64_OpAMD64SHRQ(v)
452	case OpAMD64SHRQconst:
453		return rewriteValueAMD64_OpAMD64SHRQconst(v)
454	case OpAMD64SHRW:
455		return rewriteValueAMD64_OpAMD64SHRW(v)
456	case OpAMD64SHRWconst:
457		return rewriteValueAMD64_OpAMD64SHRWconst(v)
458	case OpAMD64SHRXLload:
459		return rewriteValueAMD64_OpAMD64SHRXLload(v)
460	case OpAMD64SHRXQload:
461		return rewriteValueAMD64_OpAMD64SHRXQload(v)
462	case OpAMD64SUBL:
463		return rewriteValueAMD64_OpAMD64SUBL(v)
464	case OpAMD64SUBLconst:
465		return rewriteValueAMD64_OpAMD64SUBLconst(v)
466	case OpAMD64SUBLload:
467		return rewriteValueAMD64_OpAMD64SUBLload(v)
468	case OpAMD64SUBLmodify:
469		return rewriteValueAMD64_OpAMD64SUBLmodify(v)
470	case OpAMD64SUBQ:
471		return rewriteValueAMD64_OpAMD64SUBQ(v)
472	case OpAMD64SUBQborrow:
473		return rewriteValueAMD64_OpAMD64SUBQborrow(v)
474	case OpAMD64SUBQconst:
475		return rewriteValueAMD64_OpAMD64SUBQconst(v)
476	case OpAMD64SUBQload:
477		return rewriteValueAMD64_OpAMD64SUBQload(v)
478	case OpAMD64SUBQmodify:
479		return rewriteValueAMD64_OpAMD64SUBQmodify(v)
480	case OpAMD64SUBSD:
481		return rewriteValueAMD64_OpAMD64SUBSD(v)
482	case OpAMD64SUBSDload:
483		return rewriteValueAMD64_OpAMD64SUBSDload(v)
484	case OpAMD64SUBSS:
485		return rewriteValueAMD64_OpAMD64SUBSS(v)
486	case OpAMD64SUBSSload:
487		return rewriteValueAMD64_OpAMD64SUBSSload(v)
488	case OpAMD64TESTB:
489		return rewriteValueAMD64_OpAMD64TESTB(v)
490	case OpAMD64TESTBconst:
491		return rewriteValueAMD64_OpAMD64TESTBconst(v)
492	case OpAMD64TESTL:
493		return rewriteValueAMD64_OpAMD64TESTL(v)
494	case OpAMD64TESTLconst:
495		return rewriteValueAMD64_OpAMD64TESTLconst(v)
496	case OpAMD64TESTQ:
497		return rewriteValueAMD64_OpAMD64TESTQ(v)
498	case OpAMD64TESTQconst:
499		return rewriteValueAMD64_OpAMD64TESTQconst(v)
500	case OpAMD64TESTW:
501		return rewriteValueAMD64_OpAMD64TESTW(v)
502	case OpAMD64TESTWconst:
503		return rewriteValueAMD64_OpAMD64TESTWconst(v)
504	case OpAMD64XADDLlock:
505		return rewriteValueAMD64_OpAMD64XADDLlock(v)
506	case OpAMD64XADDQlock:
507		return rewriteValueAMD64_OpAMD64XADDQlock(v)
508	case OpAMD64XCHGL:
509		return rewriteValueAMD64_OpAMD64XCHGL(v)
510	case OpAMD64XCHGQ:
511		return rewriteValueAMD64_OpAMD64XCHGQ(v)
512	case OpAMD64XORL:
513		return rewriteValueAMD64_OpAMD64XORL(v)
514	case OpAMD64XORLconst:
515		return rewriteValueAMD64_OpAMD64XORLconst(v)
516	case OpAMD64XORLconstmodify:
517		return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
518	case OpAMD64XORLload:
519		return rewriteValueAMD64_OpAMD64XORLload(v)
520	case OpAMD64XORLmodify:
521		return rewriteValueAMD64_OpAMD64XORLmodify(v)
522	case OpAMD64XORQ:
523		return rewriteValueAMD64_OpAMD64XORQ(v)
524	case OpAMD64XORQconst:
525		return rewriteValueAMD64_OpAMD64XORQconst(v)
526	case OpAMD64XORQconstmodify:
527		return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
528	case OpAMD64XORQload:
529		return rewriteValueAMD64_OpAMD64XORQload(v)
530	case OpAMD64XORQmodify:
531		return rewriteValueAMD64_OpAMD64XORQmodify(v)
532	case OpAdd16:
533		v.Op = OpAMD64ADDL
534		return true
535	case OpAdd32:
536		v.Op = OpAMD64ADDL
537		return true
538	case OpAdd32F:
539		v.Op = OpAMD64ADDSS
540		return true
541	case OpAdd64:
542		v.Op = OpAMD64ADDQ
543		return true
544	case OpAdd64F:
545		v.Op = OpAMD64ADDSD
546		return true
547	case OpAdd8:
548		v.Op = OpAMD64ADDL
549		return true
550	case OpAddPtr:
551		v.Op = OpAMD64ADDQ
552		return true
553	case OpAddr:
554		return rewriteValueAMD64_OpAddr(v)
555	case OpAnd16:
556		v.Op = OpAMD64ANDL
557		return true
558	case OpAnd32:
559		v.Op = OpAMD64ANDL
560		return true
561	case OpAnd64:
562		v.Op = OpAMD64ANDQ
563		return true
564	case OpAnd8:
565		v.Op = OpAMD64ANDL
566		return true
567	case OpAndB:
568		v.Op = OpAMD64ANDL
569		return true
570	case OpAtomicAdd32:
571		return rewriteValueAMD64_OpAtomicAdd32(v)
572	case OpAtomicAdd64:
573		return rewriteValueAMD64_OpAtomicAdd64(v)
574	case OpAtomicAnd32:
575		return rewriteValueAMD64_OpAtomicAnd32(v)
576	case OpAtomicAnd8:
577		return rewriteValueAMD64_OpAtomicAnd8(v)
578	case OpAtomicCompareAndSwap32:
579		return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
580	case OpAtomicCompareAndSwap64:
581		return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
582	case OpAtomicExchange32:
583		return rewriteValueAMD64_OpAtomicExchange32(v)
584	case OpAtomicExchange64:
585		return rewriteValueAMD64_OpAtomicExchange64(v)
586	case OpAtomicLoad32:
587		return rewriteValueAMD64_OpAtomicLoad32(v)
588	case OpAtomicLoad64:
589		return rewriteValueAMD64_OpAtomicLoad64(v)
590	case OpAtomicLoad8:
591		return rewriteValueAMD64_OpAtomicLoad8(v)
592	case OpAtomicLoadPtr:
593		return rewriteValueAMD64_OpAtomicLoadPtr(v)
594	case OpAtomicOr32:
595		return rewriteValueAMD64_OpAtomicOr32(v)
596	case OpAtomicOr8:
597		return rewriteValueAMD64_OpAtomicOr8(v)
598	case OpAtomicStore32:
599		return rewriteValueAMD64_OpAtomicStore32(v)
600	case OpAtomicStore64:
601		return rewriteValueAMD64_OpAtomicStore64(v)
602	case OpAtomicStore8:
603		return rewriteValueAMD64_OpAtomicStore8(v)
604	case OpAtomicStorePtrNoWB:
605		return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
606	case OpAvg64u:
607		v.Op = OpAMD64AVGQU
608		return true
609	case OpBitLen16:
610		return rewriteValueAMD64_OpBitLen16(v)
611	case OpBitLen32:
612		return rewriteValueAMD64_OpBitLen32(v)
613	case OpBitLen64:
614		return rewriteValueAMD64_OpBitLen64(v)
615	case OpBitLen8:
616		return rewriteValueAMD64_OpBitLen8(v)
617	case OpBswap16:
618		return rewriteValueAMD64_OpBswap16(v)
619	case OpBswap32:
620		v.Op = OpAMD64BSWAPL
621		return true
622	case OpBswap64:
623		v.Op = OpAMD64BSWAPQ
624		return true
625	case OpCeil:
626		return rewriteValueAMD64_OpCeil(v)
627	case OpClosureCall:
628		v.Op = OpAMD64CALLclosure
629		return true
630	case OpCom16:
631		v.Op = OpAMD64NOTL
632		return true
633	case OpCom32:
634		v.Op = OpAMD64NOTL
635		return true
636	case OpCom64:
637		v.Op = OpAMD64NOTQ
638		return true
639	case OpCom8:
640		v.Op = OpAMD64NOTL
641		return true
642	case OpCondSelect:
643		return rewriteValueAMD64_OpCondSelect(v)
644	case OpConst16:
645		return rewriteValueAMD64_OpConst16(v)
646	case OpConst32:
647		v.Op = OpAMD64MOVLconst
648		return true
649	case OpConst32F:
650		v.Op = OpAMD64MOVSSconst
651		return true
652	case OpConst64:
653		v.Op = OpAMD64MOVQconst
654		return true
655	case OpConst64F:
656		v.Op = OpAMD64MOVSDconst
657		return true
658	case OpConst8:
659		return rewriteValueAMD64_OpConst8(v)
660	case OpConstBool:
661		return rewriteValueAMD64_OpConstBool(v)
662	case OpConstNil:
663		return rewriteValueAMD64_OpConstNil(v)
664	case OpCtz16:
665		return rewriteValueAMD64_OpCtz16(v)
666	case OpCtz16NonZero:
667		return rewriteValueAMD64_OpCtz16NonZero(v)
668	case OpCtz32:
669		return rewriteValueAMD64_OpCtz32(v)
670	case OpCtz32NonZero:
671		return rewriteValueAMD64_OpCtz32NonZero(v)
672	case OpCtz64:
673		return rewriteValueAMD64_OpCtz64(v)
674	case OpCtz64NonZero:
675		return rewriteValueAMD64_OpCtz64NonZero(v)
676	case OpCtz8:
677		return rewriteValueAMD64_OpCtz8(v)
678	case OpCtz8NonZero:
679		return rewriteValueAMD64_OpCtz8NonZero(v)
680	case OpCvt32Fto32:
681		v.Op = OpAMD64CVTTSS2SL
682		return true
683	case OpCvt32Fto64:
684		v.Op = OpAMD64CVTTSS2SQ
685		return true
686	case OpCvt32Fto64F:
687		v.Op = OpAMD64CVTSS2SD
688		return true
689	case OpCvt32to32F:
690		v.Op = OpAMD64CVTSL2SS
691		return true
692	case OpCvt32to64F:
693		v.Op = OpAMD64CVTSL2SD
694		return true
695	case OpCvt64Fto32:
696		v.Op = OpAMD64CVTTSD2SL
697		return true
698	case OpCvt64Fto32F:
699		v.Op = OpAMD64CVTSD2SS
700		return true
701	case OpCvt64Fto64:
702		v.Op = OpAMD64CVTTSD2SQ
703		return true
704	case OpCvt64to32F:
705		v.Op = OpAMD64CVTSQ2SS
706		return true
707	case OpCvt64to64F:
708		v.Op = OpAMD64CVTSQ2SD
709		return true
710	case OpCvtBoolToUint8:
711		v.Op = OpCopy
712		return true
713	case OpDiv128u:
714		v.Op = OpAMD64DIVQU2
715		return true
716	case OpDiv16:
717		return rewriteValueAMD64_OpDiv16(v)
718	case OpDiv16u:
719		return rewriteValueAMD64_OpDiv16u(v)
720	case OpDiv32:
721		return rewriteValueAMD64_OpDiv32(v)
722	case OpDiv32F:
723		v.Op = OpAMD64DIVSS
724		return true
725	case OpDiv32u:
726		return rewriteValueAMD64_OpDiv32u(v)
727	case OpDiv64:
728		return rewriteValueAMD64_OpDiv64(v)
729	case OpDiv64F:
730		v.Op = OpAMD64DIVSD
731		return true
732	case OpDiv64u:
733		return rewriteValueAMD64_OpDiv64u(v)
734	case OpDiv8:
735		return rewriteValueAMD64_OpDiv8(v)
736	case OpDiv8u:
737		return rewriteValueAMD64_OpDiv8u(v)
738	case OpEq16:
739		return rewriteValueAMD64_OpEq16(v)
740	case OpEq32:
741		return rewriteValueAMD64_OpEq32(v)
742	case OpEq32F:
743		return rewriteValueAMD64_OpEq32F(v)
744	case OpEq64:
745		return rewriteValueAMD64_OpEq64(v)
746	case OpEq64F:
747		return rewriteValueAMD64_OpEq64F(v)
748	case OpEq8:
749		return rewriteValueAMD64_OpEq8(v)
750	case OpEqB:
751		return rewriteValueAMD64_OpEqB(v)
752	case OpEqPtr:
753		return rewriteValueAMD64_OpEqPtr(v)
754	case OpFMA:
755		return rewriteValueAMD64_OpFMA(v)
756	case OpFloor:
757		return rewriteValueAMD64_OpFloor(v)
758	case OpGetCallerPC:
759		v.Op = OpAMD64LoweredGetCallerPC
760		return true
761	case OpGetCallerSP:
762		v.Op = OpAMD64LoweredGetCallerSP
763		return true
764	case OpGetClosurePtr:
765		v.Op = OpAMD64LoweredGetClosurePtr
766		return true
767	case OpGetG:
768		return rewriteValueAMD64_OpGetG(v)
769	case OpHasCPUFeature:
770		return rewriteValueAMD64_OpHasCPUFeature(v)
771	case OpHmul32:
772		v.Op = OpAMD64HMULL
773		return true
774	case OpHmul32u:
775		v.Op = OpAMD64HMULLU
776		return true
777	case OpHmul64:
778		v.Op = OpAMD64HMULQ
779		return true
780	case OpHmul64u:
781		v.Op = OpAMD64HMULQU
782		return true
783	case OpInterCall:
784		v.Op = OpAMD64CALLinter
785		return true
786	case OpIsInBounds:
787		return rewriteValueAMD64_OpIsInBounds(v)
788	case OpIsNonNil:
789		return rewriteValueAMD64_OpIsNonNil(v)
790	case OpIsSliceInBounds:
791		return rewriteValueAMD64_OpIsSliceInBounds(v)
792	case OpLeq16:
793		return rewriteValueAMD64_OpLeq16(v)
794	case OpLeq16U:
795		return rewriteValueAMD64_OpLeq16U(v)
796	case OpLeq32:
797		return rewriteValueAMD64_OpLeq32(v)
798	case OpLeq32F:
799		return rewriteValueAMD64_OpLeq32F(v)
800	case OpLeq32U:
801		return rewriteValueAMD64_OpLeq32U(v)
802	case OpLeq64:
803		return rewriteValueAMD64_OpLeq64(v)
804	case OpLeq64F:
805		return rewriteValueAMD64_OpLeq64F(v)
806	case OpLeq64U:
807		return rewriteValueAMD64_OpLeq64U(v)
808	case OpLeq8:
809		return rewriteValueAMD64_OpLeq8(v)
810	case OpLeq8U:
811		return rewriteValueAMD64_OpLeq8U(v)
812	case OpLess16:
813		return rewriteValueAMD64_OpLess16(v)
814	case OpLess16U:
815		return rewriteValueAMD64_OpLess16U(v)
816	case OpLess32:
817		return rewriteValueAMD64_OpLess32(v)
818	case OpLess32F:
819		return rewriteValueAMD64_OpLess32F(v)
820	case OpLess32U:
821		return rewriteValueAMD64_OpLess32U(v)
822	case OpLess64:
823		return rewriteValueAMD64_OpLess64(v)
824	case OpLess64F:
825		return rewriteValueAMD64_OpLess64F(v)
826	case OpLess64U:
827		return rewriteValueAMD64_OpLess64U(v)
828	case OpLess8:
829		return rewriteValueAMD64_OpLess8(v)
830	case OpLess8U:
831		return rewriteValueAMD64_OpLess8U(v)
832	case OpLoad:
833		return rewriteValueAMD64_OpLoad(v)
834	case OpLocalAddr:
835		return rewriteValueAMD64_OpLocalAddr(v)
836	case OpLsh16x16:
837		return rewriteValueAMD64_OpLsh16x16(v)
838	case OpLsh16x32:
839		return rewriteValueAMD64_OpLsh16x32(v)
840	case OpLsh16x64:
841		return rewriteValueAMD64_OpLsh16x64(v)
842	case OpLsh16x8:
843		return rewriteValueAMD64_OpLsh16x8(v)
844	case OpLsh32x16:
845		return rewriteValueAMD64_OpLsh32x16(v)
846	case OpLsh32x32:
847		return rewriteValueAMD64_OpLsh32x32(v)
848	case OpLsh32x64:
849		return rewriteValueAMD64_OpLsh32x64(v)
850	case OpLsh32x8:
851		return rewriteValueAMD64_OpLsh32x8(v)
852	case OpLsh64x16:
853		return rewriteValueAMD64_OpLsh64x16(v)
854	case OpLsh64x32:
855		return rewriteValueAMD64_OpLsh64x32(v)
856	case OpLsh64x64:
857		return rewriteValueAMD64_OpLsh64x64(v)
858	case OpLsh64x8:
859		return rewriteValueAMD64_OpLsh64x8(v)
860	case OpLsh8x16:
861		return rewriteValueAMD64_OpLsh8x16(v)
862	case OpLsh8x32:
863		return rewriteValueAMD64_OpLsh8x32(v)
864	case OpLsh8x64:
865		return rewriteValueAMD64_OpLsh8x64(v)
866	case OpLsh8x8:
867		return rewriteValueAMD64_OpLsh8x8(v)
868	case OpMax32F:
869		return rewriteValueAMD64_OpMax32F(v)
870	case OpMax64F:
871		return rewriteValueAMD64_OpMax64F(v)
872	case OpMin32F:
873		return rewriteValueAMD64_OpMin32F(v)
874	case OpMin64F:
875		return rewriteValueAMD64_OpMin64F(v)
876	case OpMod16:
877		return rewriteValueAMD64_OpMod16(v)
878	case OpMod16u:
879		return rewriteValueAMD64_OpMod16u(v)
880	case OpMod32:
881		return rewriteValueAMD64_OpMod32(v)
882	case OpMod32u:
883		return rewriteValueAMD64_OpMod32u(v)
884	case OpMod64:
885		return rewriteValueAMD64_OpMod64(v)
886	case OpMod64u:
887		return rewriteValueAMD64_OpMod64u(v)
888	case OpMod8:
889		return rewriteValueAMD64_OpMod8(v)
890	case OpMod8u:
891		return rewriteValueAMD64_OpMod8u(v)
892	case OpMove:
893		return rewriteValueAMD64_OpMove(v)
894	case OpMul16:
895		v.Op = OpAMD64MULL
896		return true
897	case OpMul32:
898		v.Op = OpAMD64MULL
899		return true
900	case OpMul32F:
901		v.Op = OpAMD64MULSS
902		return true
903	case OpMul64:
904		v.Op = OpAMD64MULQ
905		return true
906	case OpMul64F:
907		v.Op = OpAMD64MULSD
908		return true
909	case OpMul64uhilo:
910		v.Op = OpAMD64MULQU2
911		return true
912	case OpMul8:
913		v.Op = OpAMD64MULL
914		return true
915	case OpNeg16:
916		v.Op = OpAMD64NEGL
917		return true
918	case OpNeg32:
919		v.Op = OpAMD64NEGL
920		return true
921	case OpNeg32F:
922		return rewriteValueAMD64_OpNeg32F(v)
923	case OpNeg64:
924		v.Op = OpAMD64NEGQ
925		return true
926	case OpNeg64F:
927		return rewriteValueAMD64_OpNeg64F(v)
928	case OpNeg8:
929		v.Op = OpAMD64NEGL
930		return true
931	case OpNeq16:
932		return rewriteValueAMD64_OpNeq16(v)
933	case OpNeq32:
934		return rewriteValueAMD64_OpNeq32(v)
935	case OpNeq32F:
936		return rewriteValueAMD64_OpNeq32F(v)
937	case OpNeq64:
938		return rewriteValueAMD64_OpNeq64(v)
939	case OpNeq64F:
940		return rewriteValueAMD64_OpNeq64F(v)
941	case OpNeq8:
942		return rewriteValueAMD64_OpNeq8(v)
943	case OpNeqB:
944		return rewriteValueAMD64_OpNeqB(v)
945	case OpNeqPtr:
946		return rewriteValueAMD64_OpNeqPtr(v)
947	case OpNilCheck:
948		v.Op = OpAMD64LoweredNilCheck
949		return true
950	case OpNot:
951		return rewriteValueAMD64_OpNot(v)
952	case OpOffPtr:
953		return rewriteValueAMD64_OpOffPtr(v)
954	case OpOr16:
955		v.Op = OpAMD64ORL
956		return true
957	case OpOr32:
958		v.Op = OpAMD64ORL
959		return true
960	case OpOr64:
961		v.Op = OpAMD64ORQ
962		return true
963	case OpOr8:
964		v.Op = OpAMD64ORL
965		return true
966	case OpOrB:
967		v.Op = OpAMD64ORL
968		return true
969	case OpPanicBounds:
970		return rewriteValueAMD64_OpPanicBounds(v)
971	case OpPopCount16:
972		return rewriteValueAMD64_OpPopCount16(v)
973	case OpPopCount32:
974		v.Op = OpAMD64POPCNTL
975		return true
976	case OpPopCount64:
977		v.Op = OpAMD64POPCNTQ
978		return true
979	case OpPopCount8:
980		return rewriteValueAMD64_OpPopCount8(v)
981	case OpPrefetchCache:
982		v.Op = OpAMD64PrefetchT0
983		return true
984	case OpPrefetchCacheStreamed:
985		v.Op = OpAMD64PrefetchNTA
986		return true
987	case OpRotateLeft16:
988		v.Op = OpAMD64ROLW
989		return true
990	case OpRotateLeft32:
991		v.Op = OpAMD64ROLL
992		return true
993	case OpRotateLeft64:
994		v.Op = OpAMD64ROLQ
995		return true
996	case OpRotateLeft8:
997		v.Op = OpAMD64ROLB
998		return true
999	case OpRound32F:
1000		v.Op = OpCopy
1001		return true
1002	case OpRound64F:
1003		v.Op = OpCopy
1004		return true
1005	case OpRoundToEven:
1006		return rewriteValueAMD64_OpRoundToEven(v)
1007	case OpRsh16Ux16:
1008		return rewriteValueAMD64_OpRsh16Ux16(v)
1009	case OpRsh16Ux32:
1010		return rewriteValueAMD64_OpRsh16Ux32(v)
1011	case OpRsh16Ux64:
1012		return rewriteValueAMD64_OpRsh16Ux64(v)
1013	case OpRsh16Ux8:
1014		return rewriteValueAMD64_OpRsh16Ux8(v)
1015	case OpRsh16x16:
1016		return rewriteValueAMD64_OpRsh16x16(v)
1017	case OpRsh16x32:
1018		return rewriteValueAMD64_OpRsh16x32(v)
1019	case OpRsh16x64:
1020		return rewriteValueAMD64_OpRsh16x64(v)
1021	case OpRsh16x8:
1022		return rewriteValueAMD64_OpRsh16x8(v)
1023	case OpRsh32Ux16:
1024		return rewriteValueAMD64_OpRsh32Ux16(v)
1025	case OpRsh32Ux32:
1026		return rewriteValueAMD64_OpRsh32Ux32(v)
1027	case OpRsh32Ux64:
1028		return rewriteValueAMD64_OpRsh32Ux64(v)
1029	case OpRsh32Ux8:
1030		return rewriteValueAMD64_OpRsh32Ux8(v)
1031	case OpRsh32x16:
1032		return rewriteValueAMD64_OpRsh32x16(v)
1033	case OpRsh32x32:
1034		return rewriteValueAMD64_OpRsh32x32(v)
1035	case OpRsh32x64:
1036		return rewriteValueAMD64_OpRsh32x64(v)
1037	case OpRsh32x8:
1038		return rewriteValueAMD64_OpRsh32x8(v)
1039	case OpRsh64Ux16:
1040		return rewriteValueAMD64_OpRsh64Ux16(v)
1041	case OpRsh64Ux32:
1042		return rewriteValueAMD64_OpRsh64Ux32(v)
1043	case OpRsh64Ux64:
1044		return rewriteValueAMD64_OpRsh64Ux64(v)
1045	case OpRsh64Ux8:
1046		return rewriteValueAMD64_OpRsh64Ux8(v)
1047	case OpRsh64x16:
1048		return rewriteValueAMD64_OpRsh64x16(v)
1049	case OpRsh64x32:
1050		return rewriteValueAMD64_OpRsh64x32(v)
1051	case OpRsh64x64:
1052		return rewriteValueAMD64_OpRsh64x64(v)
1053	case OpRsh64x8:
1054		return rewriteValueAMD64_OpRsh64x8(v)
1055	case OpRsh8Ux16:
1056		return rewriteValueAMD64_OpRsh8Ux16(v)
1057	case OpRsh8Ux32:
1058		return rewriteValueAMD64_OpRsh8Ux32(v)
1059	case OpRsh8Ux64:
1060		return rewriteValueAMD64_OpRsh8Ux64(v)
1061	case OpRsh8Ux8:
1062		return rewriteValueAMD64_OpRsh8Ux8(v)
1063	case OpRsh8x16:
1064		return rewriteValueAMD64_OpRsh8x16(v)
1065	case OpRsh8x32:
1066		return rewriteValueAMD64_OpRsh8x32(v)
1067	case OpRsh8x64:
1068		return rewriteValueAMD64_OpRsh8x64(v)
1069	case OpRsh8x8:
1070		return rewriteValueAMD64_OpRsh8x8(v)
1071	case OpSelect0:
1072		return rewriteValueAMD64_OpSelect0(v)
1073	case OpSelect1:
1074		return rewriteValueAMD64_OpSelect1(v)
1075	case OpSelectN:
1076		return rewriteValueAMD64_OpSelectN(v)
1077	case OpSignExt16to32:
1078		v.Op = OpAMD64MOVWQSX
1079		return true
1080	case OpSignExt16to64:
1081		v.Op = OpAMD64MOVWQSX
1082		return true
1083	case OpSignExt32to64:
1084		v.Op = OpAMD64MOVLQSX
1085		return true
1086	case OpSignExt8to16:
1087		v.Op = OpAMD64MOVBQSX
1088		return true
1089	case OpSignExt8to32:
1090		v.Op = OpAMD64MOVBQSX
1091		return true
1092	case OpSignExt8to64:
1093		v.Op = OpAMD64MOVBQSX
1094		return true
1095	case OpSlicemask:
1096		return rewriteValueAMD64_OpSlicemask(v)
1097	case OpSpectreIndex:
1098		return rewriteValueAMD64_OpSpectreIndex(v)
1099	case OpSpectreSliceIndex:
1100		return rewriteValueAMD64_OpSpectreSliceIndex(v)
1101	case OpSqrt:
1102		v.Op = OpAMD64SQRTSD
1103		return true
1104	case OpSqrt32:
1105		v.Op = OpAMD64SQRTSS
1106		return true
1107	case OpStaticCall:
1108		v.Op = OpAMD64CALLstatic
1109		return true
1110	case OpStore:
1111		return rewriteValueAMD64_OpStore(v)
1112	case OpSub16:
1113		v.Op = OpAMD64SUBL
1114		return true
1115	case OpSub32:
1116		v.Op = OpAMD64SUBL
1117		return true
1118	case OpSub32F:
1119		v.Op = OpAMD64SUBSS
1120		return true
1121	case OpSub64:
1122		v.Op = OpAMD64SUBQ
1123		return true
1124	case OpSub64F:
1125		v.Op = OpAMD64SUBSD
1126		return true
1127	case OpSub8:
1128		v.Op = OpAMD64SUBL
1129		return true
1130	case OpSubPtr:
1131		v.Op = OpAMD64SUBQ
1132		return true
1133	case OpTailCall:
1134		v.Op = OpAMD64CALLtail
1135		return true
1136	case OpTrunc:
1137		return rewriteValueAMD64_OpTrunc(v)
1138	case OpTrunc16to8:
1139		v.Op = OpCopy
1140		return true
1141	case OpTrunc32to16:
1142		v.Op = OpCopy
1143		return true
1144	case OpTrunc32to8:
1145		v.Op = OpCopy
1146		return true
1147	case OpTrunc64to16:
1148		v.Op = OpCopy
1149		return true
1150	case OpTrunc64to32:
1151		v.Op = OpCopy
1152		return true
1153	case OpTrunc64to8:
1154		v.Op = OpCopy
1155		return true
1156	case OpWB:
1157		v.Op = OpAMD64LoweredWB
1158		return true
1159	case OpXor16:
1160		v.Op = OpAMD64XORL
1161		return true
1162	case OpXor32:
1163		v.Op = OpAMD64XORL
1164		return true
1165	case OpXor64:
1166		v.Op = OpAMD64XORQ
1167		return true
1168	case OpXor8:
1169		v.Op = OpAMD64XORL
1170		return true
1171	case OpZero:
1172		return rewriteValueAMD64_OpZero(v)
1173	case OpZeroExt16to32:
1174		v.Op = OpAMD64MOVWQZX
1175		return true
1176	case OpZeroExt16to64:
1177		v.Op = OpAMD64MOVWQZX
1178		return true
1179	case OpZeroExt32to64:
1180		v.Op = OpAMD64MOVLQZX
1181		return true
1182	case OpZeroExt8to16:
1183		v.Op = OpAMD64MOVBQZX
1184		return true
1185	case OpZeroExt8to32:
1186		v.Op = OpAMD64MOVBQZX
1187		return true
1188	case OpZeroExt8to64:
1189		v.Op = OpAMD64MOVBQZX
1190		return true
1191	}
1192	return false
1193}
1194func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1195	v_2 := v.Args[2]
1196	v_1 := v.Args[1]
1197	v_0 := v.Args[0]
1198	// match: (ADCQ x (MOVQconst [c]) carry)
1199	// cond: is32Bit(c)
1200	// result: (ADCQconst x [int32(c)] carry)
1201	for {
1202		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1203			x := v_0
1204			if v_1.Op != OpAMD64MOVQconst {
1205				continue
1206			}
1207			c := auxIntToInt64(v_1.AuxInt)
1208			carry := v_2
1209			if !(is32Bit(c)) {
1210				continue
1211			}
1212			v.reset(OpAMD64ADCQconst)
1213			v.AuxInt = int32ToAuxInt(int32(c))
1214			v.AddArg2(x, carry)
1215			return true
1216		}
1217		break
1218	}
1219	// match: (ADCQ x y (FlagEQ))
1220	// result: (ADDQcarry x y)
1221	for {
1222		x := v_0
1223		y := v_1
1224		if v_2.Op != OpAMD64FlagEQ {
1225			break
1226		}
1227		v.reset(OpAMD64ADDQcarry)
1228		v.AddArg2(x, y)
1229		return true
1230	}
1231	return false
1232}
1233func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1234	v_1 := v.Args[1]
1235	v_0 := v.Args[0]
1236	// match: (ADCQconst x [c] (FlagEQ))
1237	// result: (ADDQconstcarry x [c])
1238	for {
1239		c := auxIntToInt32(v.AuxInt)
1240		x := v_0
1241		if v_1.Op != OpAMD64FlagEQ {
1242			break
1243		}
1244		v.reset(OpAMD64ADDQconstcarry)
1245		v.AuxInt = int32ToAuxInt(c)
1246		v.AddArg(x)
1247		return true
1248	}
1249	return false
1250}
1251func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1252	v_1 := v.Args[1]
1253	v_0 := v.Args[0]
1254	// match: (ADDL x (MOVLconst [c]))
1255	// result: (ADDLconst [c] x)
1256	for {
1257		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1258			x := v_0
1259			if v_1.Op != OpAMD64MOVLconst {
1260				continue
1261			}
1262			c := auxIntToInt32(v_1.AuxInt)
1263			v.reset(OpAMD64ADDLconst)
1264			v.AuxInt = int32ToAuxInt(c)
1265			v.AddArg(x)
1266			return true
1267		}
1268		break
1269	}
1270	// match: (ADDL x (SHLLconst [3] y))
1271	// result: (LEAL8 x y)
1272	for {
1273		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1274			x := v_0
1275			if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1276				continue
1277			}
1278			y := v_1.Args[0]
1279			v.reset(OpAMD64LEAL8)
1280			v.AddArg2(x, y)
1281			return true
1282		}
1283		break
1284	}
1285	// match: (ADDL x (SHLLconst [2] y))
1286	// result: (LEAL4 x y)
1287	for {
1288		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1289			x := v_0
1290			if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1291				continue
1292			}
1293			y := v_1.Args[0]
1294			v.reset(OpAMD64LEAL4)
1295			v.AddArg2(x, y)
1296			return true
1297		}
1298		break
1299	}
1300	// match: (ADDL x (SHLLconst [1] y))
1301	// result: (LEAL2 x y)
1302	for {
1303		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1304			x := v_0
1305			if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
1306				continue
1307			}
1308			y := v_1.Args[0]
1309			v.reset(OpAMD64LEAL2)
1310			v.AddArg2(x, y)
1311			return true
1312		}
1313		break
1314	}
1315	// match: (ADDL x (ADDL y y))
1316	// result: (LEAL2 x y)
1317	for {
1318		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1319			x := v_0
1320			if v_1.Op != OpAMD64ADDL {
1321				continue
1322			}
1323			y := v_1.Args[1]
1324			if y != v_1.Args[0] {
1325				continue
1326			}
1327			v.reset(OpAMD64LEAL2)
1328			v.AddArg2(x, y)
1329			return true
1330		}
1331		break
1332	}
1333	// match: (ADDL x (ADDL x y))
1334	// result: (LEAL2 y x)
1335	for {
1336		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1337			x := v_0
1338			if v_1.Op != OpAMD64ADDL {
1339				continue
1340			}
1341			_ = v_1.Args[1]
1342			v_1_0 := v_1.Args[0]
1343			v_1_1 := v_1.Args[1]
1344			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1345				if x != v_1_0 {
1346					continue
1347				}
1348				y := v_1_1
1349				v.reset(OpAMD64LEAL2)
1350				v.AddArg2(y, x)
1351				return true
1352			}
1353		}
1354		break
1355	}
1356	// match: (ADDL (ADDLconst [c] x) y)
1357	// result: (LEAL1 [c] x y)
1358	for {
1359		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1360			if v_0.Op != OpAMD64ADDLconst {
1361				continue
1362			}
1363			c := auxIntToInt32(v_0.AuxInt)
1364			x := v_0.Args[0]
1365			y := v_1
1366			v.reset(OpAMD64LEAL1)
1367			v.AuxInt = int32ToAuxInt(c)
1368			v.AddArg2(x, y)
1369			return true
1370		}
1371		break
1372	}
1373	// match: (ADDL x (LEAL [c] {s} y))
1374	// cond: x.Op != OpSB && y.Op != OpSB
1375	// result: (LEAL1 [c] {s} x y)
1376	for {
1377		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1378			x := v_0
1379			if v_1.Op != OpAMD64LEAL {
1380				continue
1381			}
1382			c := auxIntToInt32(v_1.AuxInt)
1383			s := auxToSym(v_1.Aux)
1384			y := v_1.Args[0]
1385			if !(x.Op != OpSB && y.Op != OpSB) {
1386				continue
1387			}
1388			v.reset(OpAMD64LEAL1)
1389			v.AuxInt = int32ToAuxInt(c)
1390			v.Aux = symToAux(s)
1391			v.AddArg2(x, y)
1392			return true
1393		}
1394		break
1395	}
1396	// match: (ADDL x (NEGL y))
1397	// result: (SUBL x y)
1398	for {
1399		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1400			x := v_0
1401			if v_1.Op != OpAMD64NEGL {
1402				continue
1403			}
1404			y := v_1.Args[0]
1405			v.reset(OpAMD64SUBL)
1406			v.AddArg2(x, y)
1407			return true
1408		}
1409		break
1410	}
1411	// match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
1412	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
1413	// result: (ADDLload x [off] {sym} ptr mem)
1414	for {
1415		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1416			x := v_0
1417			l := v_1
1418			if l.Op != OpAMD64MOVLload {
1419				continue
1420			}
1421			off := auxIntToInt32(l.AuxInt)
1422			sym := auxToSym(l.Aux)
1423			mem := l.Args[1]
1424			ptr := l.Args[0]
1425			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1426				continue
1427			}
1428			v.reset(OpAMD64ADDLload)
1429			v.AuxInt = int32ToAuxInt(off)
1430			v.Aux = symToAux(sym)
1431			v.AddArg3(x, ptr, mem)
1432			return true
1433		}
1434		break
1435	}
1436	return false
1437}
1438func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1439	v_0 := v.Args[0]
1440	// match: (ADDLconst [c] (ADDL x y))
1441	// result: (LEAL1 [c] x y)
1442	for {
1443		c := auxIntToInt32(v.AuxInt)
1444		if v_0.Op != OpAMD64ADDL {
1445			break
1446		}
1447		y := v_0.Args[1]
1448		x := v_0.Args[0]
1449		v.reset(OpAMD64LEAL1)
1450		v.AuxInt = int32ToAuxInt(c)
1451		v.AddArg2(x, y)
1452		return true
1453	}
1454	// match: (ADDLconst [c] (SHLLconst [1] x))
1455	// result: (LEAL1 [c] x x)
1456	for {
1457		c := auxIntToInt32(v.AuxInt)
1458		if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1459			break
1460		}
1461		x := v_0.Args[0]
1462		v.reset(OpAMD64LEAL1)
1463		v.AuxInt = int32ToAuxInt(c)
1464		v.AddArg2(x, x)
1465		return true
1466	}
1467	// match: (ADDLconst [c] (LEAL [d] {s} x))
1468	// cond: is32Bit(int64(c)+int64(d))
1469	// result: (LEAL [c+d] {s} x)
1470	for {
1471		c := auxIntToInt32(v.AuxInt)
1472		if v_0.Op != OpAMD64LEAL {
1473			break
1474		}
1475		d := auxIntToInt32(v_0.AuxInt)
1476		s := auxToSym(v_0.Aux)
1477		x := v_0.Args[0]
1478		if !(is32Bit(int64(c) + int64(d))) {
1479			break
1480		}
1481		v.reset(OpAMD64LEAL)
1482		v.AuxInt = int32ToAuxInt(c + d)
1483		v.Aux = symToAux(s)
1484		v.AddArg(x)
1485		return true
1486	}
1487	// match: (ADDLconst [c] (LEAL1 [d] {s} x y))
1488	// cond: is32Bit(int64(c)+int64(d))
1489	// result: (LEAL1 [c+d] {s} x y)
1490	for {
1491		c := auxIntToInt32(v.AuxInt)
1492		if v_0.Op != OpAMD64LEAL1 {
1493			break
1494		}
1495		d := auxIntToInt32(v_0.AuxInt)
1496		s := auxToSym(v_0.Aux)
1497		y := v_0.Args[1]
1498		x := v_0.Args[0]
1499		if !(is32Bit(int64(c) + int64(d))) {
1500			break
1501		}
1502		v.reset(OpAMD64LEAL1)
1503		v.AuxInt = int32ToAuxInt(c + d)
1504		v.Aux = symToAux(s)
1505		v.AddArg2(x, y)
1506		return true
1507	}
1508	// match: (ADDLconst [c] (LEAL2 [d] {s} x y))
1509	// cond: is32Bit(int64(c)+int64(d))
1510	// result: (LEAL2 [c+d] {s} x y)
1511	for {
1512		c := auxIntToInt32(v.AuxInt)
1513		if v_0.Op != OpAMD64LEAL2 {
1514			break
1515		}
1516		d := auxIntToInt32(v_0.AuxInt)
1517		s := auxToSym(v_0.Aux)
1518		y := v_0.Args[1]
1519		x := v_0.Args[0]
1520		if !(is32Bit(int64(c) + int64(d))) {
1521			break
1522		}
1523		v.reset(OpAMD64LEAL2)
1524		v.AuxInt = int32ToAuxInt(c + d)
1525		v.Aux = symToAux(s)
1526		v.AddArg2(x, y)
1527		return true
1528	}
1529	// match: (ADDLconst [c] (LEAL4 [d] {s} x y))
1530	// cond: is32Bit(int64(c)+int64(d))
1531	// result: (LEAL4 [c+d] {s} x y)
1532	for {
1533		c := auxIntToInt32(v.AuxInt)
1534		if v_0.Op != OpAMD64LEAL4 {
1535			break
1536		}
1537		d := auxIntToInt32(v_0.AuxInt)
1538		s := auxToSym(v_0.Aux)
1539		y := v_0.Args[1]
1540		x := v_0.Args[0]
1541		if !(is32Bit(int64(c) + int64(d))) {
1542			break
1543		}
1544		v.reset(OpAMD64LEAL4)
1545		v.AuxInt = int32ToAuxInt(c + d)
1546		v.Aux = symToAux(s)
1547		v.AddArg2(x, y)
1548		return true
1549	}
1550	// match: (ADDLconst [c] (LEAL8 [d] {s} x y))
1551	// cond: is32Bit(int64(c)+int64(d))
1552	// result: (LEAL8 [c+d] {s} x y)
1553	for {
1554		c := auxIntToInt32(v.AuxInt)
1555		if v_0.Op != OpAMD64LEAL8 {
1556			break
1557		}
1558		d := auxIntToInt32(v_0.AuxInt)
1559		s := auxToSym(v_0.Aux)
1560		y := v_0.Args[1]
1561		x := v_0.Args[0]
1562		if !(is32Bit(int64(c) + int64(d))) {
1563			break
1564		}
1565		v.reset(OpAMD64LEAL8)
1566		v.AuxInt = int32ToAuxInt(c + d)
1567		v.Aux = symToAux(s)
1568		v.AddArg2(x, y)
1569		return true
1570	}
1571	// match: (ADDLconst [c] x)
1572	// cond: c==0
1573	// result: x
1574	for {
1575		c := auxIntToInt32(v.AuxInt)
1576		x := v_0
1577		if !(c == 0) {
1578			break
1579		}
1580		v.copyOf(x)
1581		return true
1582	}
1583	// match: (ADDLconst [c] (MOVLconst [d]))
1584	// result: (MOVLconst [c+d])
1585	for {
1586		c := auxIntToInt32(v.AuxInt)
1587		if v_0.Op != OpAMD64MOVLconst {
1588			break
1589		}
1590		d := auxIntToInt32(v_0.AuxInt)
1591		v.reset(OpAMD64MOVLconst)
1592		v.AuxInt = int32ToAuxInt(c + d)
1593		return true
1594	}
1595	// match: (ADDLconst [c] (ADDLconst [d] x))
1596	// result: (ADDLconst [c+d] x)
1597	for {
1598		c := auxIntToInt32(v.AuxInt)
1599		if v_0.Op != OpAMD64ADDLconst {
1600			break
1601		}
1602		d := auxIntToInt32(v_0.AuxInt)
1603		x := v_0.Args[0]
1604		v.reset(OpAMD64ADDLconst)
1605		v.AuxInt = int32ToAuxInt(c + d)
1606		v.AddArg(x)
1607		return true
1608	}
1609	// match: (ADDLconst [off] x:(SP))
1610	// result: (LEAL [off] x)
1611	for {
1612		off := auxIntToInt32(v.AuxInt)
1613		x := v_0
1614		if x.Op != OpSP {
1615			break
1616		}
1617		v.reset(OpAMD64LEAL)
1618		v.AuxInt = int32ToAuxInt(off)
1619		v.AddArg(x)
1620		return true
1621	}
1622	return false
1623}
1624func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1625	v_1 := v.Args[1]
1626	v_0 := v.Args[0]
1627	// match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
1628	// cond: ValAndOff(valoff1).canAdd32(off2)
1629	// result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
1630	for {
1631		valoff1 := auxIntToValAndOff(v.AuxInt)
1632		sym := auxToSym(v.Aux)
1633		if v_0.Op != OpAMD64ADDQconst {
1634			break
1635		}
1636		off2 := auxIntToInt32(v_0.AuxInt)
1637		base := v_0.Args[0]
1638		mem := v_1
1639		if !(ValAndOff(valoff1).canAdd32(off2)) {
1640			break
1641		}
1642		v.reset(OpAMD64ADDLconstmodify)
1643		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1644		v.Aux = symToAux(sym)
1645		v.AddArg2(base, mem)
1646		return true
1647	}
1648	// match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
1649	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
1650	// result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
1651	for {
1652		valoff1 := auxIntToValAndOff(v.AuxInt)
1653		sym1 := auxToSym(v.Aux)
1654		if v_0.Op != OpAMD64LEAQ {
1655			break
1656		}
1657		off2 := auxIntToInt32(v_0.AuxInt)
1658		sym2 := auxToSym(v_0.Aux)
1659		base := v_0.Args[0]
1660		mem := v_1
1661		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1662			break
1663		}
1664		v.reset(OpAMD64ADDLconstmodify)
1665		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1666		v.Aux = symToAux(mergeSym(sym1, sym2))
1667		v.AddArg2(base, mem)
1668		return true
1669	}
1670	return false
1671}
1672func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1673	v_2 := v.Args[2]
1674	v_1 := v.Args[1]
1675	v_0 := v.Args[0]
1676	b := v.Block
1677	typ := &b.Func.Config.Types
1678	// match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem)
1679	// cond: is32Bit(int64(off1)+int64(off2))
1680	// result: (ADDLload [off1+off2] {sym} val base mem)
1681	for {
1682		off1 := auxIntToInt32(v.AuxInt)
1683		sym := auxToSym(v.Aux)
1684		val := v_0
1685		if v_1.Op != OpAMD64ADDQconst {
1686			break
1687		}
1688		off2 := auxIntToInt32(v_1.AuxInt)
1689		base := v_1.Args[0]
1690		mem := v_2
1691		if !(is32Bit(int64(off1) + int64(off2))) {
1692			break
1693		}
1694		v.reset(OpAMD64ADDLload)
1695		v.AuxInt = int32ToAuxInt(off1 + off2)
1696		v.Aux = symToAux(sym)
1697		v.AddArg3(val, base, mem)
1698		return true
1699	}
1700	// match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
1701	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
1702	// result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
1703	for {
1704		off1 := auxIntToInt32(v.AuxInt)
1705		sym1 := auxToSym(v.Aux)
1706		val := v_0
1707		if v_1.Op != OpAMD64LEAQ {
1708			break
1709		}
1710		off2 := auxIntToInt32(v_1.AuxInt)
1711		sym2 := auxToSym(v_1.Aux)
1712		base := v_1.Args[0]
1713		mem := v_2
1714		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1715			break
1716		}
1717		v.reset(OpAMD64ADDLload)
1718		v.AuxInt = int32ToAuxInt(off1 + off2)
1719		v.Aux = symToAux(mergeSym(sym1, sym2))
1720		v.AddArg3(val, base, mem)
1721		return true
1722	}
1723	// match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
1724	// result: (ADDL x (MOVLf2i y))
1725	for {
1726		off := auxIntToInt32(v.AuxInt)
1727		sym := auxToSym(v.Aux)
1728		x := v_0
1729		ptr := v_1
1730		if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1731			break
1732		}
1733		y := v_2.Args[1]
1734		if ptr != v_2.Args[0] {
1735			break
1736		}
1737		v.reset(OpAMD64ADDL)
1738		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1739		v0.AddArg(y)
1740		v.AddArg2(x, v0)
1741		return true
1742	}
1743	return false
1744}
1745func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1746	v_2 := v.Args[2]
1747	v_1 := v.Args[1]
1748	v_0 := v.Args[0]
1749	// match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
1750	// cond: is32Bit(int64(off1)+int64(off2))
1751	// result: (ADDLmodify [off1+off2] {sym} base val mem)
1752	for {
1753		off1 := auxIntToInt32(v.AuxInt)
1754		sym := auxToSym(v.Aux)
1755		if v_0.Op != OpAMD64ADDQconst {
1756			break
1757		}
1758		off2 := auxIntToInt32(v_0.AuxInt)
1759		base := v_0.Args[0]
1760		val := v_1
1761		mem := v_2
1762		if !(is32Bit(int64(off1) + int64(off2))) {
1763			break
1764		}
1765		v.reset(OpAMD64ADDLmodify)
1766		v.AuxInt = int32ToAuxInt(off1 + off2)
1767		v.Aux = symToAux(sym)
1768		v.AddArg3(base, val, mem)
1769		return true
1770	}
1771	// match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
1772	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
1773	// result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1774	for {
1775		off1 := auxIntToInt32(v.AuxInt)
1776		sym1 := auxToSym(v.Aux)
1777		if v_0.Op != OpAMD64LEAQ {
1778			break
1779		}
1780		off2 := auxIntToInt32(v_0.AuxInt)
1781		sym2 := auxToSym(v_0.Aux)
1782		base := v_0.Args[0]
1783		val := v_1
1784		mem := v_2
1785		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1786			break
1787		}
1788		v.reset(OpAMD64ADDLmodify)
1789		v.AuxInt = int32ToAuxInt(off1 + off2)
1790		v.Aux = symToAux(mergeSym(sym1, sym2))
1791		v.AddArg3(base, val, mem)
1792		return true
1793	}
1794	return false
1795}
1796func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1797	v_1 := v.Args[1]
1798	v_0 := v.Args[0]
1799	// match: (ADDQ x (MOVQconst <t> [c]))
1800	// cond: is32Bit(c) && !t.IsPtr()
1801	// result: (ADDQconst [int32(c)] x)
1802	for {
1803		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1804			x := v_0
1805			if v_1.Op != OpAMD64MOVQconst {
1806				continue
1807			}
1808			t := v_1.Type
1809			c := auxIntToInt64(v_1.AuxInt)
1810			if !(is32Bit(c) && !t.IsPtr()) {
1811				continue
1812			}
1813			v.reset(OpAMD64ADDQconst)
1814			v.AuxInt = int32ToAuxInt(int32(c))
1815			v.AddArg(x)
1816			return true
1817		}
1818		break
1819	}
1820	// match: (ADDQ x (MOVLconst [c]))
1821	// result: (ADDQconst [c] x)
1822	for {
1823		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1824			x := v_0
1825			if v_1.Op != OpAMD64MOVLconst {
1826				continue
1827			}
1828			c := auxIntToInt32(v_1.AuxInt)
1829			v.reset(OpAMD64ADDQconst)
1830			v.AuxInt = int32ToAuxInt(c)
1831			v.AddArg(x)
1832			return true
1833		}
1834		break
1835	}
1836	// match: (ADDQ x (SHLQconst [3] y))
1837	// result: (LEAQ8 x y)
1838	for {
1839		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1840			x := v_0
1841			if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1842				continue
1843			}
1844			y := v_1.Args[0]
1845			v.reset(OpAMD64LEAQ8)
1846			v.AddArg2(x, y)
1847			return true
1848		}
1849		break
1850	}
1851	// match: (ADDQ x (SHLQconst [2] y))
1852	// result: (LEAQ4 x y)
1853	for {
1854		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1855			x := v_0
1856			if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1857				continue
1858			}
1859			y := v_1.Args[0]
1860			v.reset(OpAMD64LEAQ4)
1861			v.AddArg2(x, y)
1862			return true
1863		}
1864		break
1865	}
1866	// match: (ADDQ x (SHLQconst [1] y))
1867	// result: (LEAQ2 x y)
1868	for {
1869		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1870			x := v_0
1871			if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
1872				continue
1873			}
1874			y := v_1.Args[0]
1875			v.reset(OpAMD64LEAQ2)
1876			v.AddArg2(x, y)
1877			return true
1878		}
1879		break
1880	}
1881	// match: (ADDQ x (ADDQ y y))
1882	// result: (LEAQ2 x y)
1883	for {
1884		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1885			x := v_0
1886			if v_1.Op != OpAMD64ADDQ {
1887				continue
1888			}
1889			y := v_1.Args[1]
1890			if y != v_1.Args[0] {
1891				continue
1892			}
1893			v.reset(OpAMD64LEAQ2)
1894			v.AddArg2(x, y)
1895			return true
1896		}
1897		break
1898	}
1899	// match: (ADDQ x (ADDQ x y))
1900	// result: (LEAQ2 y x)
1901	for {
1902		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1903			x := v_0
1904			if v_1.Op != OpAMD64ADDQ {
1905				continue
1906			}
1907			_ = v_1.Args[1]
1908			v_1_0 := v_1.Args[0]
1909			v_1_1 := v_1.Args[1]
1910			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1911				if x != v_1_0 {
1912					continue
1913				}
1914				y := v_1_1
1915				v.reset(OpAMD64LEAQ2)
1916				v.AddArg2(y, x)
1917				return true
1918			}
1919		}
1920		break
1921	}
1922	// match: (ADDQ (ADDQconst [c] x) y)
1923	// result: (LEAQ1 [c] x y)
1924	for {
1925		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1926			if v_0.Op != OpAMD64ADDQconst {
1927				continue
1928			}
1929			c := auxIntToInt32(v_0.AuxInt)
1930			x := v_0.Args[0]
1931			y := v_1
1932			v.reset(OpAMD64LEAQ1)
1933			v.AuxInt = int32ToAuxInt(c)
1934			v.AddArg2(x, y)
1935			return true
1936		}
1937		break
1938	}
1939	// match: (ADDQ x (LEAQ [c] {s} y))
1940	// cond: x.Op != OpSB && y.Op != OpSB
1941	// result: (LEAQ1 [c] {s} x y)
1942	for {
1943		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1944			x := v_0
1945			if v_1.Op != OpAMD64LEAQ {
1946				continue
1947			}
1948			c := auxIntToInt32(v_1.AuxInt)
1949			s := auxToSym(v_1.Aux)
1950			y := v_1.Args[0]
1951			if !(x.Op != OpSB && y.Op != OpSB) {
1952				continue
1953			}
1954			v.reset(OpAMD64LEAQ1)
1955			v.AuxInt = int32ToAuxInt(c)
1956			v.Aux = symToAux(s)
1957			v.AddArg2(x, y)
1958			return true
1959		}
1960		break
1961	}
1962	// match: (ADDQ x (NEGQ y))
1963	// result: (SUBQ x y)
1964	for {
1965		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1966			x := v_0
1967			if v_1.Op != OpAMD64NEGQ {
1968				continue
1969			}
1970			y := v_1.Args[0]
1971			v.reset(OpAMD64SUBQ)
1972			v.AddArg2(x, y)
1973			return true
1974		}
1975		break
1976	}
1977	// match: (ADDQ x l:(MOVQload [off] {sym} ptr mem))
1978	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
1979	// result: (ADDQload x [off] {sym} ptr mem)
1980	for {
1981		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1982			x := v_0
1983			l := v_1
1984			if l.Op != OpAMD64MOVQload {
1985				continue
1986			}
1987			off := auxIntToInt32(l.AuxInt)
1988			sym := auxToSym(l.Aux)
1989			mem := l.Args[1]
1990			ptr := l.Args[0]
1991			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1992				continue
1993			}
1994			v.reset(OpAMD64ADDQload)
1995			v.AuxInt = int32ToAuxInt(off)
1996			v.Aux = symToAux(sym)
1997			v.AddArg3(x, ptr, mem)
1998			return true
1999		}
2000		break
2001	}
2002	return false
2003}
2004func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2005	v_1 := v.Args[1]
2006	v_0 := v.Args[0]
2007	// match: (ADDQcarry x (MOVQconst [c]))
2008	// cond: is32Bit(c)
2009	// result: (ADDQconstcarry x [int32(c)])
2010	for {
2011		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2012			x := v_0
2013			if v_1.Op != OpAMD64MOVQconst {
2014				continue
2015			}
2016			c := auxIntToInt64(v_1.AuxInt)
2017			if !(is32Bit(c)) {
2018				continue
2019			}
2020			v.reset(OpAMD64ADDQconstcarry)
2021			v.AuxInt = int32ToAuxInt(int32(c))
2022			v.AddArg(x)
2023			return true
2024		}
2025		break
2026	}
2027	return false
2028}
2029func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2030	v_0 := v.Args[0]
2031	// match: (ADDQconst [c] (ADDQ x y))
2032	// result: (LEAQ1 [c] x y)
2033	for {
2034		c := auxIntToInt32(v.AuxInt)
2035		if v_0.Op != OpAMD64ADDQ {
2036			break
2037		}
2038		y := v_0.Args[1]
2039		x := v_0.Args[0]
2040		v.reset(OpAMD64LEAQ1)
2041		v.AuxInt = int32ToAuxInt(c)
2042		v.AddArg2(x, y)
2043		return true
2044	}
2045	// match: (ADDQconst [c] (SHLQconst [1] x))
2046	// result: (LEAQ1 [c] x x)
2047	for {
2048		c := auxIntToInt32(v.AuxInt)
2049		if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
2050			break
2051		}
2052		x := v_0.Args[0]
2053		v.reset(OpAMD64LEAQ1)
2054		v.AuxInt = int32ToAuxInt(c)
2055		v.AddArg2(x, x)
2056		return true
2057	}
2058	// match: (ADDQconst [c] (LEAQ [d] {s} x))
2059	// cond: is32Bit(int64(c)+int64(d))
2060	// result: (LEAQ [c+d] {s} x)
2061	for {
2062		c := auxIntToInt32(v.AuxInt)
2063		if v_0.Op != OpAMD64LEAQ {
2064			break
2065		}
2066		d := auxIntToInt32(v_0.AuxInt)
2067		s := auxToSym(v_0.Aux)
2068		x := v_0.Args[0]
2069		if !(is32Bit(int64(c) + int64(d))) {
2070			break
2071		}
2072		v.reset(OpAMD64LEAQ)
2073		v.AuxInt = int32ToAuxInt(c + d)
2074		v.Aux = symToAux(s)
2075		v.AddArg(x)
2076		return true
2077	}
2078	// match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
2079	// cond: is32Bit(int64(c)+int64(d))
2080	// result: (LEAQ1 [c+d] {s} x y)
2081	for {
2082		c := auxIntToInt32(v.AuxInt)
2083		if v_0.Op != OpAMD64LEAQ1 {
2084			break
2085		}
2086		d := auxIntToInt32(v_0.AuxInt)
2087		s := auxToSym(v_0.Aux)
2088		y := v_0.Args[1]
2089		x := v_0.Args[0]
2090		if !(is32Bit(int64(c) + int64(d))) {
2091			break
2092		}
2093		v.reset(OpAMD64LEAQ1)
2094		v.AuxInt = int32ToAuxInt(c + d)
2095		v.Aux = symToAux(s)
2096		v.AddArg2(x, y)
2097		return true
2098	}
2099	// match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
2100	// cond: is32Bit(int64(c)+int64(d))
2101	// result: (LEAQ2 [c+d] {s} x y)
2102	for {
2103		c := auxIntToInt32(v.AuxInt)
2104		if v_0.Op != OpAMD64LEAQ2 {
2105			break
2106		}
2107		d := auxIntToInt32(v_0.AuxInt)
2108		s := auxToSym(v_0.Aux)
2109		y := v_0.Args[1]
2110		x := v_0.Args[0]
2111		if !(is32Bit(int64(c) + int64(d))) {
2112			break
2113		}
2114		v.reset(OpAMD64LEAQ2)
2115		v.AuxInt = int32ToAuxInt(c + d)
2116		v.Aux = symToAux(s)
2117		v.AddArg2(x, y)
2118		return true
2119	}
2120	// match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
2121	// cond: is32Bit(int64(c)+int64(d))
2122	// result: (LEAQ4 [c+d] {s} x y)
2123	for {
2124		c := auxIntToInt32(v.AuxInt)
2125		if v_0.Op != OpAMD64LEAQ4 {
2126			break
2127		}
2128		d := auxIntToInt32(v_0.AuxInt)
2129		s := auxToSym(v_0.Aux)
2130		y := v_0.Args[1]
2131		x := v_0.Args[0]
2132		if !(is32Bit(int64(c) + int64(d))) {
2133			break
2134		}
2135		v.reset(OpAMD64LEAQ4)
2136		v.AuxInt = int32ToAuxInt(c + d)
2137		v.Aux = symToAux(s)
2138		v.AddArg2(x, y)
2139		return true
2140	}
2141	// match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
2142	// cond: is32Bit(int64(c)+int64(d))
2143	// result: (LEAQ8 [c+d] {s} x y)
2144	for {
2145		c := auxIntToInt32(v.AuxInt)
2146		if v_0.Op != OpAMD64LEAQ8 {
2147			break
2148		}
2149		d := auxIntToInt32(v_0.AuxInt)
2150		s := auxToSym(v_0.Aux)
2151		y := v_0.Args[1]
2152		x := v_0.Args[0]
2153		if !(is32Bit(int64(c) + int64(d))) {
2154			break
2155		}
2156		v.reset(OpAMD64LEAQ8)
2157		v.AuxInt = int32ToAuxInt(c + d)
2158		v.Aux = symToAux(s)
2159		v.AddArg2(x, y)
2160		return true
2161	}
2162	// match: (ADDQconst [0] x)
2163	// result: x
2164	for {
2165		if auxIntToInt32(v.AuxInt) != 0 {
2166			break
2167		}
2168		x := v_0
2169		v.copyOf(x)
2170		return true
2171	}
2172	// match: (ADDQconst [c] (MOVQconst [d]))
2173	// result: (MOVQconst [int64(c)+d])
2174	for {
2175		c := auxIntToInt32(v.AuxInt)
2176		if v_0.Op != OpAMD64MOVQconst {
2177			break
2178		}
2179		d := auxIntToInt64(v_0.AuxInt)
2180		v.reset(OpAMD64MOVQconst)
2181		v.AuxInt = int64ToAuxInt(int64(c) + d)
2182		return true
2183	}
2184	// match: (ADDQconst [c] (ADDQconst [d] x))
2185	// cond: is32Bit(int64(c)+int64(d))
2186	// result: (ADDQconst [c+d] x)
2187	for {
2188		c := auxIntToInt32(v.AuxInt)
2189		if v_0.Op != OpAMD64ADDQconst {
2190			break
2191		}
2192		d := auxIntToInt32(v_0.AuxInt)
2193		x := v_0.Args[0]
2194		if !(is32Bit(int64(c) + int64(d))) {
2195			break
2196		}
2197		v.reset(OpAMD64ADDQconst)
2198		v.AuxInt = int32ToAuxInt(c + d)
2199		v.AddArg(x)
2200		return true
2201	}
2202	// match: (ADDQconst [off] x:(SP))
2203	// result: (LEAQ [off] x)
2204	for {
2205		off := auxIntToInt32(v.AuxInt)
2206		x := v_0
2207		if x.Op != OpSP {
2208			break
2209		}
2210		v.reset(OpAMD64LEAQ)
2211		v.AuxInt = int32ToAuxInt(off)
2212		v.AddArg(x)
2213		return true
2214	}
2215	return false
2216}
2217func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2218	v_1 := v.Args[1]
2219	v_0 := v.Args[0]
2220	// match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
2221	// cond: ValAndOff(valoff1).canAdd32(off2)
2222	// result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
2223	for {
2224		valoff1 := auxIntToValAndOff(v.AuxInt)
2225		sym := auxToSym(v.Aux)
2226		if v_0.Op != OpAMD64ADDQconst {
2227			break
2228		}
2229		off2 := auxIntToInt32(v_0.AuxInt)
2230		base := v_0.Args[0]
2231		mem := v_1
2232		if !(ValAndOff(valoff1).canAdd32(off2)) {
2233			break
2234		}
2235		v.reset(OpAMD64ADDQconstmodify)
2236		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2237		v.Aux = symToAux(sym)
2238		v.AddArg2(base, mem)
2239		return true
2240	}
2241	// match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
2242	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
2243	// result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
2244	for {
2245		valoff1 := auxIntToValAndOff(v.AuxInt)
2246		sym1 := auxToSym(v.Aux)
2247		if v_0.Op != OpAMD64LEAQ {
2248			break
2249		}
2250		off2 := auxIntToInt32(v_0.AuxInt)
2251		sym2 := auxToSym(v_0.Aux)
2252		base := v_0.Args[0]
2253		mem := v_1
2254		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2255			break
2256		}
2257		v.reset(OpAMD64ADDQconstmodify)
2258		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2259		v.Aux = symToAux(mergeSym(sym1, sym2))
2260		v.AddArg2(base, mem)
2261		return true
2262	}
2263	return false
2264}
2265func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2266	v_2 := v.Args[2]
2267	v_1 := v.Args[1]
2268	v_0 := v.Args[0]
2269	b := v.Block
2270	typ := &b.Func.Config.Types
2271	// match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem)
2272	// cond: is32Bit(int64(off1)+int64(off2))
2273	// result: (ADDQload [off1+off2] {sym} val base mem)
2274	for {
2275		off1 := auxIntToInt32(v.AuxInt)
2276		sym := auxToSym(v.Aux)
2277		val := v_0
2278		if v_1.Op != OpAMD64ADDQconst {
2279			break
2280		}
2281		off2 := auxIntToInt32(v_1.AuxInt)
2282		base := v_1.Args[0]
2283		mem := v_2
2284		if !(is32Bit(int64(off1) + int64(off2))) {
2285			break
2286		}
2287		v.reset(OpAMD64ADDQload)
2288		v.AuxInt = int32ToAuxInt(off1 + off2)
2289		v.Aux = symToAux(sym)
2290		v.AddArg3(val, base, mem)
2291		return true
2292	}
2293	// match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2294	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2295	// result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2296	for {
2297		off1 := auxIntToInt32(v.AuxInt)
2298		sym1 := auxToSym(v.Aux)
2299		val := v_0
2300		if v_1.Op != OpAMD64LEAQ {
2301			break
2302		}
2303		off2 := auxIntToInt32(v_1.AuxInt)
2304		sym2 := auxToSym(v_1.Aux)
2305		base := v_1.Args[0]
2306		mem := v_2
2307		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2308			break
2309		}
2310		v.reset(OpAMD64ADDQload)
2311		v.AuxInt = int32ToAuxInt(off1 + off2)
2312		v.Aux = symToAux(mergeSym(sym1, sym2))
2313		v.AddArg3(val, base, mem)
2314		return true
2315	}
2316	// match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
2317	// result: (ADDQ x (MOVQf2i y))
2318	for {
2319		off := auxIntToInt32(v.AuxInt)
2320		sym := auxToSym(v.Aux)
2321		x := v_0
2322		ptr := v_1
2323		if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2324			break
2325		}
2326		y := v_2.Args[1]
2327		if ptr != v_2.Args[0] {
2328			break
2329		}
2330		v.reset(OpAMD64ADDQ)
2331		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2332		v0.AddArg(y)
2333		v.AddArg2(x, v0)
2334		return true
2335	}
2336	return false
2337}
2338func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2339	v_2 := v.Args[2]
2340	v_1 := v.Args[1]
2341	v_0 := v.Args[0]
2342	// match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
2343	// cond: is32Bit(int64(off1)+int64(off2))
2344	// result: (ADDQmodify [off1+off2] {sym} base val mem)
2345	for {
2346		off1 := auxIntToInt32(v.AuxInt)
2347		sym := auxToSym(v.Aux)
2348		if v_0.Op != OpAMD64ADDQconst {
2349			break
2350		}
2351		off2 := auxIntToInt32(v_0.AuxInt)
2352		base := v_0.Args[0]
2353		val := v_1
2354		mem := v_2
2355		if !(is32Bit(int64(off1) + int64(off2))) {
2356			break
2357		}
2358		v.reset(OpAMD64ADDQmodify)
2359		v.AuxInt = int32ToAuxInt(off1 + off2)
2360		v.Aux = symToAux(sym)
2361		v.AddArg3(base, val, mem)
2362		return true
2363	}
2364	// match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
2365	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2366	// result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
2367	for {
2368		off1 := auxIntToInt32(v.AuxInt)
2369		sym1 := auxToSym(v.Aux)
2370		if v_0.Op != OpAMD64LEAQ {
2371			break
2372		}
2373		off2 := auxIntToInt32(v_0.AuxInt)
2374		sym2 := auxToSym(v_0.Aux)
2375		base := v_0.Args[0]
2376		val := v_1
2377		mem := v_2
2378		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2379			break
2380		}
2381		v.reset(OpAMD64ADDQmodify)
2382		v.AuxInt = int32ToAuxInt(off1 + off2)
2383		v.Aux = symToAux(mergeSym(sym1, sym2))
2384		v.AddArg3(base, val, mem)
2385		return true
2386	}
2387	return false
2388}
2389func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2390	v_1 := v.Args[1]
2391	v_0 := v.Args[0]
2392	// match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
2393	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
2394	// result: (ADDSDload x [off] {sym} ptr mem)
2395	for {
2396		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2397			x := v_0
2398			l := v_1
2399			if l.Op != OpAMD64MOVSDload {
2400				continue
2401			}
2402			off := auxIntToInt32(l.AuxInt)
2403			sym := auxToSym(l.Aux)
2404			mem := l.Args[1]
2405			ptr := l.Args[0]
2406			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2407				continue
2408			}
2409			v.reset(OpAMD64ADDSDload)
2410			v.AuxInt = int32ToAuxInt(off)
2411			v.Aux = symToAux(sym)
2412			v.AddArg3(x, ptr, mem)
2413			return true
2414		}
2415		break
2416	}
2417	return false
2418}
2419func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2420	v_2 := v.Args[2]
2421	v_1 := v.Args[1]
2422	v_0 := v.Args[0]
2423	b := v.Block
2424	typ := &b.Func.Config.Types
2425	// match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem)
2426	// cond: is32Bit(int64(off1)+int64(off2))
2427	// result: (ADDSDload [off1+off2] {sym} val base mem)
2428	for {
2429		off1 := auxIntToInt32(v.AuxInt)
2430		sym := auxToSym(v.Aux)
2431		val := v_0
2432		if v_1.Op != OpAMD64ADDQconst {
2433			break
2434		}
2435		off2 := auxIntToInt32(v_1.AuxInt)
2436		base := v_1.Args[0]
2437		mem := v_2
2438		if !(is32Bit(int64(off1) + int64(off2))) {
2439			break
2440		}
2441		v.reset(OpAMD64ADDSDload)
2442		v.AuxInt = int32ToAuxInt(off1 + off2)
2443		v.Aux = symToAux(sym)
2444		v.AddArg3(val, base, mem)
2445		return true
2446	}
2447	// match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2448	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2449	// result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2450	for {
2451		off1 := auxIntToInt32(v.AuxInt)
2452		sym1 := auxToSym(v.Aux)
2453		val := v_0
2454		if v_1.Op != OpAMD64LEAQ {
2455			break
2456		}
2457		off2 := auxIntToInt32(v_1.AuxInt)
2458		sym2 := auxToSym(v_1.Aux)
2459		base := v_1.Args[0]
2460		mem := v_2
2461		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2462			break
2463		}
2464		v.reset(OpAMD64ADDSDload)
2465		v.AuxInt = int32ToAuxInt(off1 + off2)
2466		v.Aux = symToAux(mergeSym(sym1, sym2))
2467		v.AddArg3(val, base, mem)
2468		return true
2469	}
2470	// match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
2471	// result: (ADDSD x (MOVQi2f y))
2472	for {
2473		off := auxIntToInt32(v.AuxInt)
2474		sym := auxToSym(v.Aux)
2475		x := v_0
2476		ptr := v_1
2477		if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2478			break
2479		}
2480		y := v_2.Args[1]
2481		if ptr != v_2.Args[0] {
2482			break
2483		}
2484		v.reset(OpAMD64ADDSD)
2485		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2486		v0.AddArg(y)
2487		v.AddArg2(x, v0)
2488		return true
2489	}
2490	return false
2491}
2492func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2493	v_1 := v.Args[1]
2494	v_0 := v.Args[0]
2495	// match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
2496	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
2497	// result: (ADDSSload x [off] {sym} ptr mem)
2498	for {
2499		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2500			x := v_0
2501			l := v_1
2502			if l.Op != OpAMD64MOVSSload {
2503				continue
2504			}
2505			off := auxIntToInt32(l.AuxInt)
2506			sym := auxToSym(l.Aux)
2507			mem := l.Args[1]
2508			ptr := l.Args[0]
2509			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2510				continue
2511			}
2512			v.reset(OpAMD64ADDSSload)
2513			v.AuxInt = int32ToAuxInt(off)
2514			v.Aux = symToAux(sym)
2515			v.AddArg3(x, ptr, mem)
2516			return true
2517		}
2518		break
2519	}
2520	return false
2521}
2522func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2523	v_2 := v.Args[2]
2524	v_1 := v.Args[1]
2525	v_0 := v.Args[0]
2526	b := v.Block
2527	typ := &b.Func.Config.Types
2528	// match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem)
2529	// cond: is32Bit(int64(off1)+int64(off2))
2530	// result: (ADDSSload [off1+off2] {sym} val base mem)
2531	for {
2532		off1 := auxIntToInt32(v.AuxInt)
2533		sym := auxToSym(v.Aux)
2534		val := v_0
2535		if v_1.Op != OpAMD64ADDQconst {
2536			break
2537		}
2538		off2 := auxIntToInt32(v_1.AuxInt)
2539		base := v_1.Args[0]
2540		mem := v_2
2541		if !(is32Bit(int64(off1) + int64(off2))) {
2542			break
2543		}
2544		v.reset(OpAMD64ADDSSload)
2545		v.AuxInt = int32ToAuxInt(off1 + off2)
2546		v.Aux = symToAux(sym)
2547		v.AddArg3(val, base, mem)
2548		return true
2549	}
2550	// match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2551	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2552	// result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2553	for {
2554		off1 := auxIntToInt32(v.AuxInt)
2555		sym1 := auxToSym(v.Aux)
2556		val := v_0
2557		if v_1.Op != OpAMD64LEAQ {
2558			break
2559		}
2560		off2 := auxIntToInt32(v_1.AuxInt)
2561		sym2 := auxToSym(v_1.Aux)
2562		base := v_1.Args[0]
2563		mem := v_2
2564		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2565			break
2566		}
2567		v.reset(OpAMD64ADDSSload)
2568		v.AuxInt = int32ToAuxInt(off1 + off2)
2569		v.Aux = symToAux(mergeSym(sym1, sym2))
2570		v.AddArg3(val, base, mem)
2571		return true
2572	}
2573	// match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
2574	// result: (ADDSS x (MOVLi2f y))
2575	for {
2576		off := auxIntToInt32(v.AuxInt)
2577		sym := auxToSym(v.Aux)
2578		x := v_0
2579		ptr := v_1
2580		if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2581			break
2582		}
2583		y := v_2.Args[1]
2584		if ptr != v_2.Args[0] {
2585			break
2586		}
2587		v.reset(OpAMD64ADDSS)
2588		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2589		v0.AddArg(y)
2590		v.AddArg2(x, v0)
2591		return true
2592	}
2593	return false
2594}
2595func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2596	v_1 := v.Args[1]
2597	v_0 := v.Args[0]
2598	b := v.Block
2599	typ := &b.Func.Config.Types
2600	// match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x)
2601	// result: (BTRL x y)
2602	for {
2603		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2604			if v_0.Op != OpAMD64NOTL {
2605				continue
2606			}
2607			v_0_0 := v_0.Args[0]
2608			if v_0_0.Op != OpAMD64SHLL {
2609				continue
2610			}
2611			y := v_0_0.Args[1]
2612			v_0_0_0 := v_0_0.Args[0]
2613			if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2614				continue
2615			}
2616			x := v_1
2617			v.reset(OpAMD64BTRL)
2618			v.AddArg2(x, y)
2619			return true
2620		}
2621		break
2622	}
2623	// match: (ANDL x (MOVLconst [c]))
2624	// result: (ANDLconst [c] x)
2625	for {
2626		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2627			x := v_0
2628			if v_1.Op != OpAMD64MOVLconst {
2629				continue
2630			}
2631			c := auxIntToInt32(v_1.AuxInt)
2632			v.reset(OpAMD64ANDLconst)
2633			v.AuxInt = int32ToAuxInt(c)
2634			v.AddArg(x)
2635			return true
2636		}
2637		break
2638	}
2639	// match: (ANDL x x)
2640	// result: x
2641	for {
2642		x := v_0
2643		if x != v_1 {
2644			break
2645		}
2646		v.copyOf(x)
2647		return true
2648	}
2649	// match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
2650	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
2651	// result: (ANDLload x [off] {sym} ptr mem)
2652	for {
2653		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2654			x := v_0
2655			l := v_1
2656			if l.Op != OpAMD64MOVLload {
2657				continue
2658			}
2659			off := auxIntToInt32(l.AuxInt)
2660			sym := auxToSym(l.Aux)
2661			mem := l.Args[1]
2662			ptr := l.Args[0]
2663			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2664				continue
2665			}
2666			v.reset(OpAMD64ANDLload)
2667			v.AuxInt = int32ToAuxInt(off)
2668			v.Aux = symToAux(sym)
2669			v.AddArg3(x, ptr, mem)
2670			return true
2671		}
2672		break
2673	}
2674	// match: (ANDL x (NOTL y))
2675	// cond: buildcfg.GOAMD64 >= 3
2676	// result: (ANDNL x y)
2677	for {
2678		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2679			x := v_0
2680			if v_1.Op != OpAMD64NOTL {
2681				continue
2682			}
2683			y := v_1.Args[0]
2684			if !(buildcfg.GOAMD64 >= 3) {
2685				continue
2686			}
2687			v.reset(OpAMD64ANDNL)
2688			v.AddArg2(x, y)
2689			return true
2690		}
2691		break
2692	}
2693	// match: (ANDL x (NEGL x))
2694	// cond: buildcfg.GOAMD64 >= 3
2695	// result: (BLSIL x)
2696	for {
2697		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2698			x := v_0
2699			if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2700				continue
2701			}
2702			v.reset(OpAMD64BLSIL)
2703			v.AddArg(x)
2704			return true
2705		}
2706		break
2707	}
2708	// match: (ANDL <t> x (ADDLconst [-1] x))
2709	// cond: buildcfg.GOAMD64 >= 3
2710	// result: (Select0 <t> (BLSRL x))
2711	for {
2712		t := v.Type
2713		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2714			x := v_0
2715			if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2716				continue
2717			}
2718			v.reset(OpSelect0)
2719			v.Type = t
2720			v0 := b.NewValue0(v.Pos, OpAMD64BLSRL, types.NewTuple(typ.UInt32, types.TypeFlags))
2721			v0.AddArg(x)
2722			v.AddArg(v0)
2723			return true
2724		}
2725		break
2726	}
2727	return false
2728}
2729func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2730	v_0 := v.Args[0]
2731	// match: (ANDLconst [c] (ANDLconst [d] x))
2732	// result: (ANDLconst [c & d] x)
2733	for {
2734		c := auxIntToInt32(v.AuxInt)
2735		if v_0.Op != OpAMD64ANDLconst {
2736			break
2737		}
2738		d := auxIntToInt32(v_0.AuxInt)
2739		x := v_0.Args[0]
2740		v.reset(OpAMD64ANDLconst)
2741		v.AuxInt = int32ToAuxInt(c & d)
2742		v.AddArg(x)
2743		return true
2744	}
2745	// match: (ANDLconst [ 0xFF] x)
2746	// result: (MOVBQZX x)
2747	for {
2748		if auxIntToInt32(v.AuxInt) != 0xFF {
2749			break
2750		}
2751		x := v_0
2752		v.reset(OpAMD64MOVBQZX)
2753		v.AddArg(x)
2754		return true
2755	}
2756	// match: (ANDLconst [0xFFFF] x)
2757	// result: (MOVWQZX x)
2758	for {
2759		if auxIntToInt32(v.AuxInt) != 0xFFFF {
2760			break
2761		}
2762		x := v_0
2763		v.reset(OpAMD64MOVWQZX)
2764		v.AddArg(x)
2765		return true
2766	}
2767	// match: (ANDLconst [c] _)
2768	// cond: c==0
2769	// result: (MOVLconst [0])
2770	for {
2771		c := auxIntToInt32(v.AuxInt)
2772		if !(c == 0) {
2773			break
2774		}
2775		v.reset(OpAMD64MOVLconst)
2776		v.AuxInt = int32ToAuxInt(0)
2777		return true
2778	}
2779	// match: (ANDLconst [c] x)
2780	// cond: c==-1
2781	// result: x
2782	for {
2783		c := auxIntToInt32(v.AuxInt)
2784		x := v_0
2785		if !(c == -1) {
2786			break
2787		}
2788		v.copyOf(x)
2789		return true
2790	}
2791	// match: (ANDLconst [c] (MOVLconst [d]))
2792	// result: (MOVLconst [c&d])
2793	for {
2794		c := auxIntToInt32(v.AuxInt)
2795		if v_0.Op != OpAMD64MOVLconst {
2796			break
2797		}
2798		d := auxIntToInt32(v_0.AuxInt)
2799		v.reset(OpAMD64MOVLconst)
2800		v.AuxInt = int32ToAuxInt(c & d)
2801		return true
2802	}
2803	return false
2804}
2805func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2806	v_1 := v.Args[1]
2807	v_0 := v.Args[0]
2808	// match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
2809	// cond: ValAndOff(valoff1).canAdd32(off2)
2810	// result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
2811	for {
2812		valoff1 := auxIntToValAndOff(v.AuxInt)
2813		sym := auxToSym(v.Aux)
2814		if v_0.Op != OpAMD64ADDQconst {
2815			break
2816		}
2817		off2 := auxIntToInt32(v_0.AuxInt)
2818		base := v_0.Args[0]
2819		mem := v_1
2820		if !(ValAndOff(valoff1).canAdd32(off2)) {
2821			break
2822		}
2823		v.reset(OpAMD64ANDLconstmodify)
2824		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2825		v.Aux = symToAux(sym)
2826		v.AddArg2(base, mem)
2827		return true
2828	}
2829	// match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
2830	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
2831	// result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
2832	for {
2833		valoff1 := auxIntToValAndOff(v.AuxInt)
2834		sym1 := auxToSym(v.Aux)
2835		if v_0.Op != OpAMD64LEAQ {
2836			break
2837		}
2838		off2 := auxIntToInt32(v_0.AuxInt)
2839		sym2 := auxToSym(v_0.Aux)
2840		base := v_0.Args[0]
2841		mem := v_1
2842		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2843			break
2844		}
2845		v.reset(OpAMD64ANDLconstmodify)
2846		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2847		v.Aux = symToAux(mergeSym(sym1, sym2))
2848		v.AddArg2(base, mem)
2849		return true
2850	}
2851	return false
2852}
2853func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2854	v_2 := v.Args[2]
2855	v_1 := v.Args[1]
2856	v_0 := v.Args[0]
2857	b := v.Block
2858	typ := &b.Func.Config.Types
2859	// match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem)
2860	// cond: is32Bit(int64(off1)+int64(off2))
2861	// result: (ANDLload [off1+off2] {sym} val base mem)
2862	for {
2863		off1 := auxIntToInt32(v.AuxInt)
2864		sym := auxToSym(v.Aux)
2865		val := v_0
2866		if v_1.Op != OpAMD64ADDQconst {
2867			break
2868		}
2869		off2 := auxIntToInt32(v_1.AuxInt)
2870		base := v_1.Args[0]
2871		mem := v_2
2872		if !(is32Bit(int64(off1) + int64(off2))) {
2873			break
2874		}
2875		v.reset(OpAMD64ANDLload)
2876		v.AuxInt = int32ToAuxInt(off1 + off2)
2877		v.Aux = symToAux(sym)
2878		v.AddArg3(val, base, mem)
2879		return true
2880	}
2881	// match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2882	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2883	// result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2884	for {
2885		off1 := auxIntToInt32(v.AuxInt)
2886		sym1 := auxToSym(v.Aux)
2887		val := v_0
2888		if v_1.Op != OpAMD64LEAQ {
2889			break
2890		}
2891		off2 := auxIntToInt32(v_1.AuxInt)
2892		sym2 := auxToSym(v_1.Aux)
2893		base := v_1.Args[0]
2894		mem := v_2
2895		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2896			break
2897		}
2898		v.reset(OpAMD64ANDLload)
2899		v.AuxInt = int32ToAuxInt(off1 + off2)
2900		v.Aux = symToAux(mergeSym(sym1, sym2))
2901		v.AddArg3(val, base, mem)
2902		return true
2903	}
2904	// match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
2905	// result: (ANDL x (MOVLf2i y))
2906	for {
2907		off := auxIntToInt32(v.AuxInt)
2908		sym := auxToSym(v.Aux)
2909		x := v_0
2910		ptr := v_1
2911		if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2912			break
2913		}
2914		y := v_2.Args[1]
2915		if ptr != v_2.Args[0] {
2916			break
2917		}
2918		v.reset(OpAMD64ANDL)
2919		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2920		v0.AddArg(y)
2921		v.AddArg2(x, v0)
2922		return true
2923	}
2924	return false
2925}
2926func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
2927	v_2 := v.Args[2]
2928	v_1 := v.Args[1]
2929	v_0 := v.Args[0]
2930	// match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
2931	// cond: is32Bit(int64(off1)+int64(off2))
2932	// result: (ANDLmodify [off1+off2] {sym} base val mem)
2933	for {
2934		off1 := auxIntToInt32(v.AuxInt)
2935		sym := auxToSym(v.Aux)
2936		if v_0.Op != OpAMD64ADDQconst {
2937			break
2938		}
2939		off2 := auxIntToInt32(v_0.AuxInt)
2940		base := v_0.Args[0]
2941		val := v_1
2942		mem := v_2
2943		if !(is32Bit(int64(off1) + int64(off2))) {
2944			break
2945		}
2946		v.reset(OpAMD64ANDLmodify)
2947		v.AuxInt = int32ToAuxInt(off1 + off2)
2948		v.Aux = symToAux(sym)
2949		v.AddArg3(base, val, mem)
2950		return true
2951	}
2952	// match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
2953	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2954	// result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
2955	for {
2956		off1 := auxIntToInt32(v.AuxInt)
2957		sym1 := auxToSym(v.Aux)
2958		if v_0.Op != OpAMD64LEAQ {
2959			break
2960		}
2961		off2 := auxIntToInt32(v_0.AuxInt)
2962		sym2 := auxToSym(v_0.Aux)
2963		base := v_0.Args[0]
2964		val := v_1
2965		mem := v_2
2966		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2967			break
2968		}
2969		v.reset(OpAMD64ANDLmodify)
2970		v.AuxInt = int32ToAuxInt(off1 + off2)
2971		v.Aux = symToAux(mergeSym(sym1, sym2))
2972		v.AddArg3(base, val, mem)
2973		return true
2974	}
2975	return false
2976}
2977func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
2978	v_1 := v.Args[1]
2979	v_0 := v.Args[0]
2980	// match: (ANDNL x (SHLL (MOVLconst [1]) y))
2981	// result: (BTRL x y)
2982	for {
2983		x := v_0
2984		if v_1.Op != OpAMD64SHLL {
2985			break
2986		}
2987		y := v_1.Args[1]
2988		v_1_0 := v_1.Args[0]
2989		if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
2990			break
2991		}
2992		v.reset(OpAMD64BTRL)
2993		v.AddArg2(x, y)
2994		return true
2995	}
2996	return false
2997}
2998func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
2999	v_1 := v.Args[1]
3000	v_0 := v.Args[0]
3001	// match: (ANDNQ x (SHLQ (MOVQconst [1]) y))
3002	// result: (BTRQ x y)
3003	for {
3004		x := v_0
3005		if v_1.Op != OpAMD64SHLQ {
3006			break
3007		}
3008		y := v_1.Args[1]
3009		v_1_0 := v_1.Args[0]
3010		if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
3011			break
3012		}
3013		v.reset(OpAMD64BTRQ)
3014		v.AddArg2(x, y)
3015		return true
3016	}
3017	return false
3018}
3019func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3020	v_1 := v.Args[1]
3021	v_0 := v.Args[0]
3022	b := v.Block
3023	typ := &b.Func.Config.Types
3024	// match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x)
3025	// result: (BTRQ x y)
3026	for {
3027		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3028			if v_0.Op != OpAMD64NOTQ {
3029				continue
3030			}
3031			v_0_0 := v_0.Args[0]
3032			if v_0_0.Op != OpAMD64SHLQ {
3033				continue
3034			}
3035			y := v_0_0.Args[1]
3036			v_0_0_0 := v_0_0.Args[0]
3037			if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3038				continue
3039			}
3040			x := v_1
3041			v.reset(OpAMD64BTRQ)
3042			v.AddArg2(x, y)
3043			return true
3044		}
3045		break
3046	}
3047	// match: (ANDQ (MOVQconst [c]) x)
3048	// cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31
3049	// result: (BTRQconst [int8(log64(^c))] x)
3050	for {
3051		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3052			if v_0.Op != OpAMD64MOVQconst {
3053				continue
3054			}
3055			c := auxIntToInt64(v_0.AuxInt)
3056			x := v_1
3057			if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
3058				continue
3059			}
3060			v.reset(OpAMD64BTRQconst)
3061			v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3062			v.AddArg(x)
3063			return true
3064		}
3065		break
3066	}
3067	// match: (ANDQ x (MOVQconst [c]))
3068	// cond: is32Bit(c)
3069	// result: (ANDQconst [int32(c)] x)
3070	for {
3071		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3072			x := v_0
3073			if v_1.Op != OpAMD64MOVQconst {
3074				continue
3075			}
3076			c := auxIntToInt64(v_1.AuxInt)
3077			if !(is32Bit(c)) {
3078				continue
3079			}
3080			v.reset(OpAMD64ANDQconst)
3081			v.AuxInt = int32ToAuxInt(int32(c))
3082			v.AddArg(x)
3083			return true
3084		}
3085		break
3086	}
3087	// match: (ANDQ x x)
3088	// result: x
3089	for {
3090		x := v_0
3091		if x != v_1 {
3092			break
3093		}
3094		v.copyOf(x)
3095		return true
3096	}
3097	// match: (ANDQ x l:(MOVQload [off] {sym} ptr mem))
3098	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
3099	// result: (ANDQload x [off] {sym} ptr mem)
3100	for {
3101		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3102			x := v_0
3103			l := v_1
3104			if l.Op != OpAMD64MOVQload {
3105				continue
3106			}
3107			off := auxIntToInt32(l.AuxInt)
3108			sym := auxToSym(l.Aux)
3109			mem := l.Args[1]
3110			ptr := l.Args[0]
3111			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3112				continue
3113			}
3114			v.reset(OpAMD64ANDQload)
3115			v.AuxInt = int32ToAuxInt(off)
3116			v.Aux = symToAux(sym)
3117			v.AddArg3(x, ptr, mem)
3118			return true
3119		}
3120		break
3121	}
3122	// match: (ANDQ x (NOTQ y))
3123	// cond: buildcfg.GOAMD64 >= 3
3124	// result: (ANDNQ x y)
3125	for {
3126		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3127			x := v_0
3128			if v_1.Op != OpAMD64NOTQ {
3129				continue
3130			}
3131			y := v_1.Args[0]
3132			if !(buildcfg.GOAMD64 >= 3) {
3133				continue
3134			}
3135			v.reset(OpAMD64ANDNQ)
3136			v.AddArg2(x, y)
3137			return true
3138		}
3139		break
3140	}
3141	// match: (ANDQ x (NEGQ x))
3142	// cond: buildcfg.GOAMD64 >= 3
3143	// result: (BLSIQ x)
3144	for {
3145		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3146			x := v_0
3147			if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3148				continue
3149			}
3150			v.reset(OpAMD64BLSIQ)
3151			v.AddArg(x)
3152			return true
3153		}
3154		break
3155	}
3156	// match: (ANDQ <t> x (ADDQconst [-1] x))
3157	// cond: buildcfg.GOAMD64 >= 3
3158	// result: (Select0 <t> (BLSRQ x))
3159	for {
3160		t := v.Type
3161		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3162			x := v_0
3163			if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3164				continue
3165			}
3166			v.reset(OpSelect0)
3167			v.Type = t
3168			v0 := b.NewValue0(v.Pos, OpAMD64BLSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
3169			v0.AddArg(x)
3170			v.AddArg(v0)
3171			return true
3172		}
3173		break
3174	}
3175	return false
3176}
3177func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3178	v_0 := v.Args[0]
3179	// match: (ANDQconst [c] (ANDQconst [d] x))
3180	// result: (ANDQconst [c & d] x)
3181	for {
3182		c := auxIntToInt32(v.AuxInt)
3183		if v_0.Op != OpAMD64ANDQconst {
3184			break
3185		}
3186		d := auxIntToInt32(v_0.AuxInt)
3187		x := v_0.Args[0]
3188		v.reset(OpAMD64ANDQconst)
3189		v.AuxInt = int32ToAuxInt(c & d)
3190		v.AddArg(x)
3191		return true
3192	}
3193	// match: (ANDQconst [ 0xFF] x)
3194	// result: (MOVBQZX x)
3195	for {
3196		if auxIntToInt32(v.AuxInt) != 0xFF {
3197			break
3198		}
3199		x := v_0
3200		v.reset(OpAMD64MOVBQZX)
3201		v.AddArg(x)
3202		return true
3203	}
3204	// match: (ANDQconst [0xFFFF] x)
3205	// result: (MOVWQZX x)
3206	for {
3207		if auxIntToInt32(v.AuxInt) != 0xFFFF {
3208			break
3209		}
3210		x := v_0
3211		v.reset(OpAMD64MOVWQZX)
3212		v.AddArg(x)
3213		return true
3214	}
3215	// match: (ANDQconst [0] _)
3216	// result: (MOVQconst [0])
3217	for {
3218		if auxIntToInt32(v.AuxInt) != 0 {
3219			break
3220		}
3221		v.reset(OpAMD64MOVQconst)
3222		v.AuxInt = int64ToAuxInt(0)
3223		return true
3224	}
3225	// match: (ANDQconst [-1] x)
3226	// result: x
3227	for {
3228		if auxIntToInt32(v.AuxInt) != -1 {
3229			break
3230		}
3231		x := v_0
3232		v.copyOf(x)
3233		return true
3234	}
3235	// match: (ANDQconst [c] (MOVQconst [d]))
3236	// result: (MOVQconst [int64(c)&d])
3237	for {
3238		c := auxIntToInt32(v.AuxInt)
3239		if v_0.Op != OpAMD64MOVQconst {
3240			break
3241		}
3242		d := auxIntToInt64(v_0.AuxInt)
3243		v.reset(OpAMD64MOVQconst)
3244		v.AuxInt = int64ToAuxInt(int64(c) & d)
3245		return true
3246	}
3247	return false
3248}
3249func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3250	v_1 := v.Args[1]
3251	v_0 := v.Args[0]
3252	// match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
3253	// cond: ValAndOff(valoff1).canAdd32(off2)
3254	// result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
3255	for {
3256		valoff1 := auxIntToValAndOff(v.AuxInt)
3257		sym := auxToSym(v.Aux)
3258		if v_0.Op != OpAMD64ADDQconst {
3259			break
3260		}
3261		off2 := auxIntToInt32(v_0.AuxInt)
3262		base := v_0.Args[0]
3263		mem := v_1
3264		if !(ValAndOff(valoff1).canAdd32(off2)) {
3265			break
3266		}
3267		v.reset(OpAMD64ANDQconstmodify)
3268		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3269		v.Aux = symToAux(sym)
3270		v.AddArg2(base, mem)
3271		return true
3272	}
3273	// match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
3274	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
3275	// result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
3276	for {
3277		valoff1 := auxIntToValAndOff(v.AuxInt)
3278		sym1 := auxToSym(v.Aux)
3279		if v_0.Op != OpAMD64LEAQ {
3280			break
3281		}
3282		off2 := auxIntToInt32(v_0.AuxInt)
3283		sym2 := auxToSym(v_0.Aux)
3284		base := v_0.Args[0]
3285		mem := v_1
3286		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3287			break
3288		}
3289		v.reset(OpAMD64ANDQconstmodify)
3290		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3291		v.Aux = symToAux(mergeSym(sym1, sym2))
3292		v.AddArg2(base, mem)
3293		return true
3294	}
3295	return false
3296}
3297func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3298	v_2 := v.Args[2]
3299	v_1 := v.Args[1]
3300	v_0 := v.Args[0]
3301	b := v.Block
3302	typ := &b.Func.Config.Types
3303	// match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem)
3304	// cond: is32Bit(int64(off1)+int64(off2))
3305	// result: (ANDQload [off1+off2] {sym} val base mem)
3306	for {
3307		off1 := auxIntToInt32(v.AuxInt)
3308		sym := auxToSym(v.Aux)
3309		val := v_0
3310		if v_1.Op != OpAMD64ADDQconst {
3311			break
3312		}
3313		off2 := auxIntToInt32(v_1.AuxInt)
3314		base := v_1.Args[0]
3315		mem := v_2
3316		if !(is32Bit(int64(off1) + int64(off2))) {
3317			break
3318		}
3319		v.reset(OpAMD64ANDQload)
3320		v.AuxInt = int32ToAuxInt(off1 + off2)
3321		v.Aux = symToAux(sym)
3322		v.AddArg3(val, base, mem)
3323		return true
3324	}
3325	// match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
3326	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3327	// result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
3328	for {
3329		off1 := auxIntToInt32(v.AuxInt)
3330		sym1 := auxToSym(v.Aux)
3331		val := v_0
3332		if v_1.Op != OpAMD64LEAQ {
3333			break
3334		}
3335		off2 := auxIntToInt32(v_1.AuxInt)
3336		sym2 := auxToSym(v_1.Aux)
3337		base := v_1.Args[0]
3338		mem := v_2
3339		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3340			break
3341		}
3342		v.reset(OpAMD64ANDQload)
3343		v.AuxInt = int32ToAuxInt(off1 + off2)
3344		v.Aux = symToAux(mergeSym(sym1, sym2))
3345		v.AddArg3(val, base, mem)
3346		return true
3347	}
3348	// match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
3349	// result: (ANDQ x (MOVQf2i y))
3350	for {
3351		off := auxIntToInt32(v.AuxInt)
3352		sym := auxToSym(v.Aux)
3353		x := v_0
3354		ptr := v_1
3355		if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3356			break
3357		}
3358		y := v_2.Args[1]
3359		if ptr != v_2.Args[0] {
3360			break
3361		}
3362		v.reset(OpAMD64ANDQ)
3363		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3364		v0.AddArg(y)
3365		v.AddArg2(x, v0)
3366		return true
3367	}
3368	return false
3369}
3370func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3371	v_2 := v.Args[2]
3372	v_1 := v.Args[1]
3373	v_0 := v.Args[0]
3374	// match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
3375	// cond: is32Bit(int64(off1)+int64(off2))
3376	// result: (ANDQmodify [off1+off2] {sym} base val mem)
3377	for {
3378		off1 := auxIntToInt32(v.AuxInt)
3379		sym := auxToSym(v.Aux)
3380		if v_0.Op != OpAMD64ADDQconst {
3381			break
3382		}
3383		off2 := auxIntToInt32(v_0.AuxInt)
3384		base := v_0.Args[0]
3385		val := v_1
3386		mem := v_2
3387		if !(is32Bit(int64(off1) + int64(off2))) {
3388			break
3389		}
3390		v.reset(OpAMD64ANDQmodify)
3391		v.AuxInt = int32ToAuxInt(off1 + off2)
3392		v.Aux = symToAux(sym)
3393		v.AddArg3(base, val, mem)
3394		return true
3395	}
3396	// match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
3397	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3398	// result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
3399	for {
3400		off1 := auxIntToInt32(v.AuxInt)
3401		sym1 := auxToSym(v.Aux)
3402		if v_0.Op != OpAMD64LEAQ {
3403			break
3404		}
3405		off2 := auxIntToInt32(v_0.AuxInt)
3406		sym2 := auxToSym(v_0.Aux)
3407		base := v_0.Args[0]
3408		val := v_1
3409		mem := v_2
3410		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3411			break
3412		}
3413		v.reset(OpAMD64ANDQmodify)
3414		v.AuxInt = int32ToAuxInt(off1 + off2)
3415		v.Aux = symToAux(mergeSym(sym1, sym2))
3416		v.AddArg3(base, val, mem)
3417		return true
3418	}
3419	return false
3420}
3421func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3422	v_0 := v.Args[0]
3423	b := v.Block
3424	// match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x)))
3425	// result: (BSFQ (ORQconst <t> [1<<8] x))
3426	for {
3427		if v_0.Op != OpAMD64ORQconst {
3428			break
3429		}
3430		t := v_0.Type
3431		if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3432			break
3433		}
3434		v_0_0 := v_0.Args[0]
3435		if v_0_0.Op != OpAMD64MOVBQZX {
3436			break
3437		}
3438		x := v_0_0.Args[0]
3439		v.reset(OpAMD64BSFQ)
3440		v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3441		v0.AuxInt = int32ToAuxInt(1 << 8)
3442		v0.AddArg(x)
3443		v.AddArg(v0)
3444		return true
3445	}
3446	// match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x)))
3447	// result: (BSFQ (ORQconst <t> [1<<16] x))
3448	for {
3449		if v_0.Op != OpAMD64ORQconst {
3450			break
3451		}
3452		t := v_0.Type
3453		if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3454			break
3455		}
3456		v_0_0 := v_0.Args[0]
3457		if v_0_0.Op != OpAMD64MOVWQZX {
3458			break
3459		}
3460		x := v_0_0.Args[0]
3461		v.reset(OpAMD64BSFQ)
3462		v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3463		v0.AuxInt = int32ToAuxInt(1 << 16)
3464		v0.AddArg(x)
3465		v.AddArg(v0)
3466		return true
3467	}
3468	return false
3469}
3470func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
3471	v_0 := v.Args[0]
3472	b := v.Block
3473	typ := &b.Func.Config.Types
3474	// match: (BSWAPL (BSWAPL p))
3475	// result: p
3476	for {
3477		if v_0.Op != OpAMD64BSWAPL {
3478			break
3479		}
3480		p := v_0.Args[0]
3481		v.copyOf(p)
3482		return true
3483	}
3484	// match: (BSWAPL x:(MOVLload [i] {s} p mem))
3485	// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
3486	// result: @x.Block (MOVBELload [i] {s} p mem)
3487	for {
3488		x := v_0
3489		if x.Op != OpAMD64MOVLload {
3490			break
3491		}
3492		i := auxIntToInt32(x.AuxInt)
3493		s := auxToSym(x.Aux)
3494		mem := x.Args[1]
3495		p := x.Args[0]
3496		if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3497			break
3498		}
3499		b = x.Block
3500		v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32)
3501		v.copyOf(v0)
3502		v0.AuxInt = int32ToAuxInt(i)
3503		v0.Aux = symToAux(s)
3504		v0.AddArg2(p, mem)
3505		return true
3506	}
3507	// match: (BSWAPL x:(MOVBELload [i] {s} p mem))
3508	// cond: x.Uses == 1
3509	// result: @x.Block (MOVLload [i] {s} p mem)
3510	for {
3511		x := v_0
3512		if x.Op != OpAMD64MOVBELload {
3513			break
3514		}
3515		i := auxIntToInt32(x.AuxInt)
3516		s := auxToSym(x.Aux)
3517		mem := x.Args[1]
3518		p := x.Args[0]
3519		if !(x.Uses == 1) {
3520			break
3521		}
3522		b = x.Block
3523		v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32)
3524		v.copyOf(v0)
3525		v0.AuxInt = int32ToAuxInt(i)
3526		v0.Aux = symToAux(s)
3527		v0.AddArg2(p, mem)
3528		return true
3529	}
3530	return false
3531}
3532func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
3533	v_0 := v.Args[0]
3534	b := v.Block
3535	typ := &b.Func.Config.Types
3536	// match: (BSWAPQ (BSWAPQ p))
3537	// result: p
3538	for {
3539		if v_0.Op != OpAMD64BSWAPQ {
3540			break
3541		}
3542		p := v_0.Args[0]
3543		v.copyOf(p)
3544		return true
3545	}
3546	// match: (BSWAPQ x:(MOVQload [i] {s} p mem))
3547	// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
3548	// result: @x.Block (MOVBEQload [i] {s} p mem)
3549	for {
3550		x := v_0
3551		if x.Op != OpAMD64MOVQload {
3552			break
3553		}
3554		i := auxIntToInt32(x.AuxInt)
3555		s := auxToSym(x.Aux)
3556		mem := x.Args[1]
3557		p := x.Args[0]
3558		if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3559			break
3560		}
3561		b = x.Block
3562		v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64)
3563		v.copyOf(v0)
3564		v0.AuxInt = int32ToAuxInt(i)
3565		v0.Aux = symToAux(s)
3566		v0.AddArg2(p, mem)
3567		return true
3568	}
3569	// match: (BSWAPQ x:(MOVBEQload [i] {s} p mem))
3570	// cond: x.Uses == 1
3571	// result: @x.Block (MOVQload [i] {s} p mem)
3572	for {
3573		x := v_0
3574		if x.Op != OpAMD64MOVBEQload {
3575			break
3576		}
3577		i := auxIntToInt32(x.AuxInt)
3578		s := auxToSym(x.Aux)
3579		mem := x.Args[1]
3580		p := x.Args[0]
3581		if !(x.Uses == 1) {
3582			break
3583		}
3584		b = x.Block
3585		v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64)
3586		v.copyOf(v0)
3587		v0.AuxInt = int32ToAuxInt(i)
3588		v0.Aux = symToAux(s)
3589		v0.AddArg2(p, mem)
3590		return true
3591	}
3592	return false
3593}
3594func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3595	v_0 := v.Args[0]
3596	// match: (BTCQconst [c] (MOVQconst [d]))
3597	// result: (MOVQconst [d^(1<<uint32(c))])
3598	for {
3599		c := auxIntToInt8(v.AuxInt)
3600		if v_0.Op != OpAMD64MOVQconst {
3601			break
3602		}
3603		d := auxIntToInt64(v_0.AuxInt)
3604		v.reset(OpAMD64MOVQconst)
3605		v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3606		return true
3607	}
3608	return false
3609}
3610func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3611	v_0 := v.Args[0]
3612	// match: (BTLconst [c] (SHRQconst [d] x))
3613	// cond: (c+d)<64
3614	// result: (BTQconst [c+d] x)
3615	for {
3616		c := auxIntToInt8(v.AuxInt)
3617		if v_0.Op != OpAMD64SHRQconst {
3618			break
3619		}
3620		d := auxIntToInt8(v_0.AuxInt)
3621		x := v_0.Args[0]
3622		if !((c + d) < 64) {
3623			break
3624		}
3625		v.reset(OpAMD64BTQconst)
3626		v.AuxInt = int8ToAuxInt(c + d)
3627		v.AddArg(x)
3628		return true
3629	}
3630	// match: (BTLconst [c] (SHLQconst [d] x))
3631	// cond: c>d
3632	// result: (BTLconst [c-d] x)
3633	for {
3634		c := auxIntToInt8(v.AuxInt)
3635		if v_0.Op != OpAMD64SHLQconst {
3636			break
3637		}
3638		d := auxIntToInt8(v_0.AuxInt)
3639		x := v_0.Args[0]
3640		if !(c > d) {
3641			break
3642		}
3643		v.reset(OpAMD64BTLconst)
3644		v.AuxInt = int8ToAuxInt(c - d)
3645		v.AddArg(x)
3646		return true
3647	}
3648	// match: (BTLconst [0] s:(SHRQ x y))
3649	// result: (BTQ y x)
3650	for {
3651		if auxIntToInt8(v.AuxInt) != 0 {
3652			break
3653		}
3654		s := v_0
3655		if s.Op != OpAMD64SHRQ {
3656			break
3657		}
3658		y := s.Args[1]
3659		x := s.Args[0]
3660		v.reset(OpAMD64BTQ)
3661		v.AddArg2(y, x)
3662		return true
3663	}
3664	// match: (BTLconst [c] (SHRLconst [d] x))
3665	// cond: (c+d)<32
3666	// result: (BTLconst [c+d] x)
3667	for {
3668		c := auxIntToInt8(v.AuxInt)
3669		if v_0.Op != OpAMD64SHRLconst {
3670			break
3671		}
3672		d := auxIntToInt8(v_0.AuxInt)
3673		x := v_0.Args[0]
3674		if !((c + d) < 32) {
3675			break
3676		}
3677		v.reset(OpAMD64BTLconst)
3678		v.AuxInt = int8ToAuxInt(c + d)
3679		v.AddArg(x)
3680		return true
3681	}
3682	// match: (BTLconst [c] (SHLLconst [d] x))
3683	// cond: c>d
3684	// result: (BTLconst [c-d] x)
3685	for {
3686		c := auxIntToInt8(v.AuxInt)
3687		if v_0.Op != OpAMD64SHLLconst {
3688			break
3689		}
3690		d := auxIntToInt8(v_0.AuxInt)
3691		x := v_0.Args[0]
3692		if !(c > d) {
3693			break
3694		}
3695		v.reset(OpAMD64BTLconst)
3696		v.AuxInt = int8ToAuxInt(c - d)
3697		v.AddArg(x)
3698		return true
3699	}
3700	// match: (BTLconst [0] s:(SHRL x y))
3701	// result: (BTL y x)
3702	for {
3703		if auxIntToInt8(v.AuxInt) != 0 {
3704			break
3705		}
3706		s := v_0
3707		if s.Op != OpAMD64SHRL {
3708			break
3709		}
3710		y := s.Args[1]
3711		x := s.Args[0]
3712		v.reset(OpAMD64BTL)
3713		v.AddArg2(y, x)
3714		return true
3715	}
3716	// match: (BTLconst [0] s:(SHRXL x y))
3717	// result: (BTL y x)
3718	for {
3719		if auxIntToInt8(v.AuxInt) != 0 {
3720			break
3721		}
3722		s := v_0
3723		if s.Op != OpAMD64SHRXL {
3724			break
3725		}
3726		y := s.Args[1]
3727		x := s.Args[0]
3728		v.reset(OpAMD64BTL)
3729		v.AddArg2(y, x)
3730		return true
3731	}
3732	return false
3733}
3734func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3735	v_0 := v.Args[0]
3736	// match: (BTQconst [c] (SHRQconst [d] x))
3737	// cond: (c+d)<64
3738	// result: (BTQconst [c+d] x)
3739	for {
3740		c := auxIntToInt8(v.AuxInt)
3741		if v_0.Op != OpAMD64SHRQconst {
3742			break
3743		}
3744		d := auxIntToInt8(v_0.AuxInt)
3745		x := v_0.Args[0]
3746		if !((c + d) < 64) {
3747			break
3748		}
3749		v.reset(OpAMD64BTQconst)
3750		v.AuxInt = int8ToAuxInt(c + d)
3751		v.AddArg(x)
3752		return true
3753	}
3754	// match: (BTQconst [c] (SHLQconst [d] x))
3755	// cond: c>d
3756	// result: (BTQconst [c-d] x)
3757	for {
3758		c := auxIntToInt8(v.AuxInt)
3759		if v_0.Op != OpAMD64SHLQconst {
3760			break
3761		}
3762		d := auxIntToInt8(v_0.AuxInt)
3763		x := v_0.Args[0]
3764		if !(c > d) {
3765			break
3766		}
3767		v.reset(OpAMD64BTQconst)
3768		v.AuxInt = int8ToAuxInt(c - d)
3769		v.AddArg(x)
3770		return true
3771	}
3772	// match: (BTQconst [0] s:(SHRQ x y))
3773	// result: (BTQ y x)
3774	for {
3775		if auxIntToInt8(v.AuxInt) != 0 {
3776			break
3777		}
3778		s := v_0
3779		if s.Op != OpAMD64SHRQ {
3780			break
3781		}
3782		y := s.Args[1]
3783		x := s.Args[0]
3784		v.reset(OpAMD64BTQ)
3785		v.AddArg2(y, x)
3786		return true
3787	}
3788	return false
3789}
3790func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
3791	v_0 := v.Args[0]
3792	// match: (BTRQconst [c] (BTSQconst [c] x))
3793	// result: (BTRQconst [c] x)
3794	for {
3795		c := auxIntToInt8(v.AuxInt)
3796		if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
3797			break
3798		}
3799		x := v_0.Args[0]
3800		v.reset(OpAMD64BTRQconst)
3801		v.AuxInt = int8ToAuxInt(c)
3802		v.AddArg(x)
3803		return true
3804	}
3805	// match: (BTRQconst [c] (BTCQconst [c] x))
3806	// result: (BTRQconst [c] x)
3807	for {
3808		c := auxIntToInt8(v.AuxInt)
3809		if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3810			break
3811		}
3812		x := v_0.Args[0]
3813		v.reset(OpAMD64BTRQconst)
3814		v.AuxInt = int8ToAuxInt(c)
3815		v.AddArg(x)
3816		return true
3817	}
3818	// match: (BTRQconst [c] (MOVQconst [d]))
3819	// result: (MOVQconst [d&^(1<<uint32(c))])
3820	for {
3821		c := auxIntToInt8(v.AuxInt)
3822		if v_0.Op != OpAMD64MOVQconst {
3823			break
3824		}
3825		d := auxIntToInt64(v_0.AuxInt)
3826		v.reset(OpAMD64MOVQconst)
3827		v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
3828		return true
3829	}
3830	return false
3831}
3832func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
3833	v_0 := v.Args[0]
3834	// match: (BTSQconst [c] (BTRQconst [c] x))
3835	// result: (BTSQconst [c] x)
3836	for {
3837		c := auxIntToInt8(v.AuxInt)
3838		if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
3839			break
3840		}
3841		x := v_0.Args[0]
3842		v.reset(OpAMD64BTSQconst)
3843		v.AuxInt = int8ToAuxInt(c)
3844		v.AddArg(x)
3845		return true
3846	}
3847	// match: (BTSQconst [c] (BTCQconst [c] x))
3848	// result: (BTSQconst [c] x)
3849	for {
3850		c := auxIntToInt8(v.AuxInt)
3851		if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3852			break
3853		}
3854		x := v_0.Args[0]
3855		v.reset(OpAMD64BTSQconst)
3856		v.AuxInt = int8ToAuxInt(c)
3857		v.AddArg(x)
3858		return true
3859	}
3860	// match: (BTSQconst [c] (MOVQconst [d]))
3861	// result: (MOVQconst [d|(1<<uint32(c))])
3862	for {
3863		c := auxIntToInt8(v.AuxInt)
3864		if v_0.Op != OpAMD64MOVQconst {
3865			break
3866		}
3867		d := auxIntToInt64(v_0.AuxInt)
3868		v.reset(OpAMD64MOVQconst)
3869		v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
3870		return true
3871	}
3872	return false
3873}
3874func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
3875	v_2 := v.Args[2]
3876	v_1 := v.Args[1]
3877	v_0 := v.Args[0]
3878	// match: (CMOVLCC x y (InvertFlags cond))
3879	// result: (CMOVLLS x y cond)
3880	for {
3881		x := v_0
3882		y := v_1
3883		if v_2.Op != OpAMD64InvertFlags {
3884			break
3885		}
3886		cond := v_2.Args[0]
3887		v.reset(OpAMD64CMOVLLS)
3888		v.AddArg3(x, y, cond)
3889		return true
3890	}
3891	// match: (CMOVLCC _ x (FlagEQ))
3892	// result: x
3893	for {
3894		x := v_1
3895		if v_2.Op != OpAMD64FlagEQ {
3896			break
3897		}
3898		v.copyOf(x)
3899		return true
3900	}
3901	// match: (CMOVLCC _ x (FlagGT_UGT))
3902	// result: x
3903	for {
3904		x := v_1
3905		if v_2.Op != OpAMD64FlagGT_UGT {
3906			break
3907		}
3908		v.copyOf(x)
3909		return true
3910	}
3911	// match: (CMOVLCC y _ (FlagGT_ULT))
3912	// result: y
3913	for {
3914		y := v_0
3915		if v_2.Op != OpAMD64FlagGT_ULT {
3916			break
3917		}
3918		v.copyOf(y)
3919		return true
3920	}
3921	// match: (CMOVLCC y _ (FlagLT_ULT))
3922	// result: y
3923	for {
3924		y := v_0
3925		if v_2.Op != OpAMD64FlagLT_ULT {
3926			break
3927		}
3928		v.copyOf(y)
3929		return true
3930	}
3931	// match: (CMOVLCC _ x (FlagLT_UGT))
3932	// result: x
3933	for {
3934		x := v_1
3935		if v_2.Op != OpAMD64FlagLT_UGT {
3936			break
3937		}
3938		v.copyOf(x)
3939		return true
3940	}
3941	return false
3942}
3943func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
3944	v_2 := v.Args[2]
3945	v_1 := v.Args[1]
3946	v_0 := v.Args[0]
3947	// match: (CMOVLCS x y (InvertFlags cond))
3948	// result: (CMOVLHI x y cond)
3949	for {
3950		x := v_0
3951		y := v_1
3952		if v_2.Op != OpAMD64InvertFlags {
3953			break
3954		}
3955		cond := v_2.Args[0]
3956		v.reset(OpAMD64CMOVLHI)
3957		v.AddArg3(x, y, cond)
3958		return true
3959	}
3960	// match: (CMOVLCS y _ (FlagEQ))
3961	// result: y
3962	for {
3963		y := v_0
3964		if v_2.Op != OpAMD64FlagEQ {
3965			break
3966		}
3967		v.copyOf(y)
3968		return true
3969	}
3970	// match: (CMOVLCS y _ (FlagGT_UGT))
3971	// result: y
3972	for {
3973		y := v_0
3974		if v_2.Op != OpAMD64FlagGT_UGT {
3975			break
3976		}
3977		v.copyOf(y)
3978		return true
3979	}
3980	// match: (CMOVLCS _ x (FlagGT_ULT))
3981	// result: x
3982	for {
3983		x := v_1
3984		if v_2.Op != OpAMD64FlagGT_ULT {
3985			break
3986		}
3987		v.copyOf(x)
3988		return true
3989	}
3990	// match: (CMOVLCS _ x (FlagLT_ULT))
3991	// result: x
3992	for {
3993		x := v_1
3994		if v_2.Op != OpAMD64FlagLT_ULT {
3995			break
3996		}
3997		v.copyOf(x)
3998		return true
3999	}
4000	// match: (CMOVLCS y _ (FlagLT_UGT))
4001	// result: y
4002	for {
4003		y := v_0
4004		if v_2.Op != OpAMD64FlagLT_UGT {
4005			break
4006		}
4007		v.copyOf(y)
4008		return true
4009	}
4010	return false
4011}
4012func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4013	v_2 := v.Args[2]
4014	v_1 := v.Args[1]
4015	v_0 := v.Args[0]
4016	b := v.Block
4017	// match: (CMOVLEQ x y (InvertFlags cond))
4018	// result: (CMOVLEQ x y cond)
4019	for {
4020		x := v_0
4021		y := v_1
4022		if v_2.Op != OpAMD64InvertFlags {
4023			break
4024		}
4025		cond := v_2.Args[0]
4026		v.reset(OpAMD64CMOVLEQ)
4027		v.AddArg3(x, y, cond)
4028		return true
4029	}
4030	// match: (CMOVLEQ _ x (FlagEQ))
4031	// result: x
4032	for {
4033		x := v_1
4034		if v_2.Op != OpAMD64FlagEQ {
4035			break
4036		}
4037		v.copyOf(x)
4038		return true
4039	}
4040	// match: (CMOVLEQ y _ (FlagGT_UGT))
4041	// result: y
4042	for {
4043		y := v_0
4044		if v_2.Op != OpAMD64FlagGT_UGT {
4045			break
4046		}
4047		v.copyOf(y)
4048		return true
4049	}
4050	// match: (CMOVLEQ y _ (FlagGT_ULT))
4051	// result: y
4052	for {
4053		y := v_0
4054		if v_2.Op != OpAMD64FlagGT_ULT {
4055			break
4056		}
4057		v.copyOf(y)
4058		return true
4059	}
4060	// match: (CMOVLEQ y _ (FlagLT_ULT))
4061	// result: y
4062	for {
4063		y := v_0
4064		if v_2.Op != OpAMD64FlagLT_ULT {
4065			break
4066		}
4067		v.copyOf(y)
4068		return true
4069	}
4070	// match: (CMOVLEQ y _ (FlagLT_UGT))
4071	// result: y
4072	for {
4073		y := v_0
4074		if v_2.Op != OpAMD64FlagLT_UGT {
4075			break
4076		}
4077		v.copyOf(y)
4078		return true
4079	}
4080	// match: (CMOVLEQ x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
4081	// result: (CMOVLEQ x y (Select1 <types.TypeFlags> blsr))
4082	for {
4083		x := v_0
4084		y := v_1
4085		if v_2.Op != OpAMD64TESTQ {
4086			break
4087		}
4088		_ = v_2.Args[1]
4089		v_2_0 := v_2.Args[0]
4090		v_2_1 := v_2.Args[1]
4091		for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4092			s := v_2_0
4093			if s.Op != OpSelect0 {
4094				continue
4095			}
4096			blsr := s.Args[0]
4097			if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4098				continue
4099			}
4100			v.reset(OpAMD64CMOVLEQ)
4101			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4102			v0.AddArg(blsr)
4103			v.AddArg3(x, y, v0)
4104			return true
4105		}
4106		break
4107	}
4108	// match: (CMOVLEQ x y (TESTL s:(Select0 blsr:(BLSRL _)) s))
4109	// result: (CMOVLEQ x y (Select1 <types.TypeFlags> blsr))
4110	for {
4111		x := v_0
4112		y := v_1
4113		if v_2.Op != OpAMD64TESTL {
4114			break
4115		}
4116		_ = v_2.Args[1]
4117		v_2_0 := v_2.Args[0]
4118		v_2_1 := v_2.Args[1]
4119		for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4120			s := v_2_0
4121			if s.Op != OpSelect0 {
4122				continue
4123			}
4124			blsr := s.Args[0]
4125			if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4126				continue
4127			}
4128			v.reset(OpAMD64CMOVLEQ)
4129			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4130			v0.AddArg(blsr)
4131			v.AddArg3(x, y, v0)
4132			return true
4133		}
4134		break
4135	}
4136	return false
4137}
4138func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4139	v_2 := v.Args[2]
4140	v_1 := v.Args[1]
4141	v_0 := v.Args[0]
4142	// match: (CMOVLGE x y (InvertFlags cond))
4143	// result: (CMOVLLE x y cond)
4144	for {
4145		x := v_0
4146		y := v_1
4147		if v_2.Op != OpAMD64InvertFlags {
4148			break
4149		}
4150		cond := v_2.Args[0]
4151		v.reset(OpAMD64CMOVLLE)
4152		v.AddArg3(x, y, cond)
4153		return true
4154	}
4155	// match: (CMOVLGE _ x (FlagEQ))
4156	// result: x
4157	for {
4158		x := v_1
4159		if v_2.Op != OpAMD64FlagEQ {
4160			break
4161		}
4162		v.copyOf(x)
4163		return true
4164	}
4165	// match: (CMOVLGE _ x (FlagGT_UGT))
4166	// result: x
4167	for {
4168		x := v_1
4169		if v_2.Op != OpAMD64FlagGT_UGT {
4170			break
4171		}
4172		v.copyOf(x)
4173		return true
4174	}
4175	// match: (CMOVLGE _ x (FlagGT_ULT))
4176	// result: x
4177	for {
4178		x := v_1
4179		if v_2.Op != OpAMD64FlagGT_ULT {
4180			break
4181		}
4182		v.copyOf(x)
4183		return true
4184	}
4185	// match: (CMOVLGE y _ (FlagLT_ULT))
4186	// result: y
4187	for {
4188		y := v_0
4189		if v_2.Op != OpAMD64FlagLT_ULT {
4190			break
4191		}
4192		v.copyOf(y)
4193		return true
4194	}
4195	// match: (CMOVLGE y _ (FlagLT_UGT))
4196	// result: y
4197	for {
4198		y := v_0
4199		if v_2.Op != OpAMD64FlagLT_UGT {
4200			break
4201		}
4202		v.copyOf(y)
4203		return true
4204	}
4205	return false
4206}
4207func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4208	v_2 := v.Args[2]
4209	v_1 := v.Args[1]
4210	v_0 := v.Args[0]
4211	// match: (CMOVLGT x y (InvertFlags cond))
4212	// result: (CMOVLLT x y cond)
4213	for {
4214		x := v_0
4215		y := v_1
4216		if v_2.Op != OpAMD64InvertFlags {
4217			break
4218		}
4219		cond := v_2.Args[0]
4220		v.reset(OpAMD64CMOVLLT)
4221		v.AddArg3(x, y, cond)
4222		return true
4223	}
4224	// match: (CMOVLGT y _ (FlagEQ))
4225	// result: y
4226	for {
4227		y := v_0
4228		if v_2.Op != OpAMD64FlagEQ {
4229			break
4230		}
4231		v.copyOf(y)
4232		return true
4233	}
4234	// match: (CMOVLGT _ x (FlagGT_UGT))
4235	// result: x
4236	for {
4237		x := v_1
4238		if v_2.Op != OpAMD64FlagGT_UGT {
4239			break
4240		}
4241		v.copyOf(x)
4242		return true
4243	}
4244	// match: (CMOVLGT _ x (FlagGT_ULT))
4245	// result: x
4246	for {
4247		x := v_1
4248		if v_2.Op != OpAMD64FlagGT_ULT {
4249			break
4250		}
4251		v.copyOf(x)
4252		return true
4253	}
4254	// match: (CMOVLGT y _ (FlagLT_ULT))
4255	// result: y
4256	for {
4257		y := v_0
4258		if v_2.Op != OpAMD64FlagLT_ULT {
4259			break
4260		}
4261		v.copyOf(y)
4262		return true
4263	}
4264	// match: (CMOVLGT y _ (FlagLT_UGT))
4265	// result: y
4266	for {
4267		y := v_0
4268		if v_2.Op != OpAMD64FlagLT_UGT {
4269			break
4270		}
4271		v.copyOf(y)
4272		return true
4273	}
4274	return false
4275}
4276func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4277	v_2 := v.Args[2]
4278	v_1 := v.Args[1]
4279	v_0 := v.Args[0]
4280	// match: (CMOVLHI x y (InvertFlags cond))
4281	// result: (CMOVLCS x y cond)
4282	for {
4283		x := v_0
4284		y := v_1
4285		if v_2.Op != OpAMD64InvertFlags {
4286			break
4287		}
4288		cond := v_2.Args[0]
4289		v.reset(OpAMD64CMOVLCS)
4290		v.AddArg3(x, y, cond)
4291		return true
4292	}
4293	// match: (CMOVLHI y _ (FlagEQ))
4294	// result: y
4295	for {
4296		y := v_0
4297		if v_2.Op != OpAMD64FlagEQ {
4298			break
4299		}
4300		v.copyOf(y)
4301		return true
4302	}
4303	// match: (CMOVLHI _ x (FlagGT_UGT))
4304	// result: x
4305	for {
4306		x := v_1
4307		if v_2.Op != OpAMD64FlagGT_UGT {
4308			break
4309		}
4310		v.copyOf(x)
4311		return true
4312	}
4313	// match: (CMOVLHI y _ (FlagGT_ULT))
4314	// result: y
4315	for {
4316		y := v_0
4317		if v_2.Op != OpAMD64FlagGT_ULT {
4318			break
4319		}
4320		v.copyOf(y)
4321		return true
4322	}
4323	// match: (CMOVLHI y _ (FlagLT_ULT))
4324	// result: y
4325	for {
4326		y := v_0
4327		if v_2.Op != OpAMD64FlagLT_ULT {
4328			break
4329		}
4330		v.copyOf(y)
4331		return true
4332	}
4333	// match: (CMOVLHI _ x (FlagLT_UGT))
4334	// result: x
4335	for {
4336		x := v_1
4337		if v_2.Op != OpAMD64FlagLT_UGT {
4338			break
4339		}
4340		v.copyOf(x)
4341		return true
4342	}
4343	return false
4344}
4345func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
4346	v_2 := v.Args[2]
4347	v_1 := v.Args[1]
4348	v_0 := v.Args[0]
4349	// match: (CMOVLLE x y (InvertFlags cond))
4350	// result: (CMOVLGE x y cond)
4351	for {
4352		x := v_0
4353		y := v_1
4354		if v_2.Op != OpAMD64InvertFlags {
4355			break
4356		}
4357		cond := v_2.Args[0]
4358		v.reset(OpAMD64CMOVLGE)
4359		v.AddArg3(x, y, cond)
4360		return true
4361	}
4362	// match: (CMOVLLE _ x (FlagEQ))
4363	// result: x
4364	for {
4365		x := v_1
4366		if v_2.Op != OpAMD64FlagEQ {
4367			break
4368		}
4369		v.copyOf(x)
4370		return true
4371	}
4372	// match: (CMOVLLE y _ (FlagGT_UGT))
4373	// result: y
4374	for {
4375		y := v_0
4376		if v_2.Op != OpAMD64FlagGT_UGT {
4377			break
4378		}
4379		v.copyOf(y)
4380		return true
4381	}
4382	// match: (CMOVLLE y _ (FlagGT_ULT))
4383	// result: y
4384	for {
4385		y := v_0
4386		if v_2.Op != OpAMD64FlagGT_ULT {
4387			break
4388		}
4389		v.copyOf(y)
4390		return true
4391	}
4392	// match: (CMOVLLE _ x (FlagLT_ULT))
4393	// result: x
4394	for {
4395		x := v_1
4396		if v_2.Op != OpAMD64FlagLT_ULT {
4397			break
4398		}
4399		v.copyOf(x)
4400		return true
4401	}
4402	// match: (CMOVLLE _ x (FlagLT_UGT))
4403	// result: x
4404	for {
4405		x := v_1
4406		if v_2.Op != OpAMD64FlagLT_UGT {
4407			break
4408		}
4409		v.copyOf(x)
4410		return true
4411	}
4412	return false
4413}
4414func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
4415	v_2 := v.Args[2]
4416	v_1 := v.Args[1]
4417	v_0 := v.Args[0]
4418	// match: (CMOVLLS x y (InvertFlags cond))
4419	// result: (CMOVLCC x y cond)
4420	for {
4421		x := v_0
4422		y := v_1
4423		if v_2.Op != OpAMD64InvertFlags {
4424			break
4425		}
4426		cond := v_2.Args[0]
4427		v.reset(OpAMD64CMOVLCC)
4428		v.AddArg3(x, y, cond)
4429		return true
4430	}
4431	// match: (CMOVLLS _ x (FlagEQ))
4432	// result: x
4433	for {
4434		x := v_1
4435		if v_2.Op != OpAMD64FlagEQ {
4436			break
4437		}
4438		v.copyOf(x)
4439		return true
4440	}
4441	// match: (CMOVLLS y _ (FlagGT_UGT))
4442	// result: y
4443	for {
4444		y := v_0
4445		if v_2.Op != OpAMD64FlagGT_UGT {
4446			break
4447		}
4448		v.copyOf(y)
4449		return true
4450	}
4451	// match: (CMOVLLS _ x (FlagGT_ULT))
4452	// result: x
4453	for {
4454		x := v_1
4455		if v_2.Op != OpAMD64FlagGT_ULT {
4456			break
4457		}
4458		v.copyOf(x)
4459		return true
4460	}
4461	// match: (CMOVLLS _ x (FlagLT_ULT))
4462	// result: x
4463	for {
4464		x := v_1
4465		if v_2.Op != OpAMD64FlagLT_ULT {
4466			break
4467		}
4468		v.copyOf(x)
4469		return true
4470	}
4471	// match: (CMOVLLS y _ (FlagLT_UGT))
4472	// result: y
4473	for {
4474		y := v_0
4475		if v_2.Op != OpAMD64FlagLT_UGT {
4476			break
4477		}
4478		v.copyOf(y)
4479		return true
4480	}
4481	return false
4482}
4483func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
4484	v_2 := v.Args[2]
4485	v_1 := v.Args[1]
4486	v_0 := v.Args[0]
4487	// match: (CMOVLLT x y (InvertFlags cond))
4488	// result: (CMOVLGT x y cond)
4489	for {
4490		x := v_0
4491		y := v_1
4492		if v_2.Op != OpAMD64InvertFlags {
4493			break
4494		}
4495		cond := v_2.Args[0]
4496		v.reset(OpAMD64CMOVLGT)
4497		v.AddArg3(x, y, cond)
4498		return true
4499	}
4500	// match: (CMOVLLT y _ (FlagEQ))
4501	// result: y
4502	for {
4503		y := v_0
4504		if v_2.Op != OpAMD64FlagEQ {
4505			break
4506		}
4507		v.copyOf(y)
4508		return true
4509	}
4510	// match: (CMOVLLT y _ (FlagGT_UGT))
4511	// result: y
4512	for {
4513		y := v_0
4514		if v_2.Op != OpAMD64FlagGT_UGT {
4515			break
4516		}
4517		v.copyOf(y)
4518		return true
4519	}
4520	// match: (CMOVLLT y _ (FlagGT_ULT))
4521	// result: y
4522	for {
4523		y := v_0
4524		if v_2.Op != OpAMD64FlagGT_ULT {
4525			break
4526		}
4527		v.copyOf(y)
4528		return true
4529	}
4530	// match: (CMOVLLT _ x (FlagLT_ULT))
4531	// result: x
4532	for {
4533		x := v_1
4534		if v_2.Op != OpAMD64FlagLT_ULT {
4535			break
4536		}
4537		v.copyOf(x)
4538		return true
4539	}
4540	// match: (CMOVLLT _ x (FlagLT_UGT))
4541	// result: x
4542	for {
4543		x := v_1
4544		if v_2.Op != OpAMD64FlagLT_UGT {
4545			break
4546		}
4547		v.copyOf(x)
4548		return true
4549	}
4550	return false
4551}
4552func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
4553	v_2 := v.Args[2]
4554	v_1 := v.Args[1]
4555	v_0 := v.Args[0]
4556	b := v.Block
4557	// match: (CMOVLNE x y (InvertFlags cond))
4558	// result: (CMOVLNE x y cond)
4559	for {
4560		x := v_0
4561		y := v_1
4562		if v_2.Op != OpAMD64InvertFlags {
4563			break
4564		}
4565		cond := v_2.Args[0]
4566		v.reset(OpAMD64CMOVLNE)
4567		v.AddArg3(x, y, cond)
4568		return true
4569	}
4570	// match: (CMOVLNE y _ (FlagEQ))
4571	// result: y
4572	for {
4573		y := v_0
4574		if v_2.Op != OpAMD64FlagEQ {
4575			break
4576		}
4577		v.copyOf(y)
4578		return true
4579	}
4580	// match: (CMOVLNE _ x (FlagGT_UGT))
4581	// result: x
4582	for {
4583		x := v_1
4584		if v_2.Op != OpAMD64FlagGT_UGT {
4585			break
4586		}
4587		v.copyOf(x)
4588		return true
4589	}
4590	// match: (CMOVLNE _ x (FlagGT_ULT))
4591	// result: x
4592	for {
4593		x := v_1
4594		if v_2.Op != OpAMD64FlagGT_ULT {
4595			break
4596		}
4597		v.copyOf(x)
4598		return true
4599	}
4600	// match: (CMOVLNE _ x (FlagLT_ULT))
4601	// result: x
4602	for {
4603		x := v_1
4604		if v_2.Op != OpAMD64FlagLT_ULT {
4605			break
4606		}
4607		v.copyOf(x)
4608		return true
4609	}
4610	// match: (CMOVLNE _ x (FlagLT_UGT))
4611	// result: x
4612	for {
4613		x := v_1
4614		if v_2.Op != OpAMD64FlagLT_UGT {
4615			break
4616		}
4617		v.copyOf(x)
4618		return true
4619	}
4620	// match: (CMOVLNE x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
4621	// result: (CMOVLNE x y (Select1 <types.TypeFlags> blsr))
4622	for {
4623		x := v_0
4624		y := v_1
4625		if v_2.Op != OpAMD64TESTQ {
4626			break
4627		}
4628		_ = v_2.Args[1]
4629		v_2_0 := v_2.Args[0]
4630		v_2_1 := v_2.Args[1]
4631		for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4632			s := v_2_0
4633			if s.Op != OpSelect0 {
4634				continue
4635			}
4636			blsr := s.Args[0]
4637			if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4638				continue
4639			}
4640			v.reset(OpAMD64CMOVLNE)
4641			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4642			v0.AddArg(blsr)
4643			v.AddArg3(x, y, v0)
4644			return true
4645		}
4646		break
4647	}
4648	// match: (CMOVLNE x y (TESTL s:(Select0 blsr:(BLSRL _)) s))
4649	// result: (CMOVLNE x y (Select1 <types.TypeFlags> blsr))
4650	for {
4651		x := v_0
4652		y := v_1
4653		if v_2.Op != OpAMD64TESTL {
4654			break
4655		}
4656		_ = v_2.Args[1]
4657		v_2_0 := v_2.Args[0]
4658		v_2_1 := v_2.Args[1]
4659		for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4660			s := v_2_0
4661			if s.Op != OpSelect0 {
4662				continue
4663			}
4664			blsr := s.Args[0]
4665			if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4666				continue
4667			}
4668			v.reset(OpAMD64CMOVLNE)
4669			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4670			v0.AddArg(blsr)
4671			v.AddArg3(x, y, v0)
4672			return true
4673		}
4674		break
4675	}
4676	return false
4677}
4678func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
4679	v_2 := v.Args[2]
4680	v_1 := v.Args[1]
4681	v_0 := v.Args[0]
4682	// match: (CMOVQCC x y (InvertFlags cond))
4683	// result: (CMOVQLS x y cond)
4684	for {
4685		x := v_0
4686		y := v_1
4687		if v_2.Op != OpAMD64InvertFlags {
4688			break
4689		}
4690		cond := v_2.Args[0]
4691		v.reset(OpAMD64CMOVQLS)
4692		v.AddArg3(x, y, cond)
4693		return true
4694	}
4695	// match: (CMOVQCC _ x (FlagEQ))
4696	// result: x
4697	for {
4698		x := v_1
4699		if v_2.Op != OpAMD64FlagEQ {
4700			break
4701		}
4702		v.copyOf(x)
4703		return true
4704	}
4705	// match: (CMOVQCC _ x (FlagGT_UGT))
4706	// result: x
4707	for {
4708		x := v_1
4709		if v_2.Op != OpAMD64FlagGT_UGT {
4710			break
4711		}
4712		v.copyOf(x)
4713		return true
4714	}
4715	// match: (CMOVQCC y _ (FlagGT_ULT))
4716	// result: y
4717	for {
4718		y := v_0
4719		if v_2.Op != OpAMD64FlagGT_ULT {
4720			break
4721		}
4722		v.copyOf(y)
4723		return true
4724	}
4725	// match: (CMOVQCC y _ (FlagLT_ULT))
4726	// result: y
4727	for {
4728		y := v_0
4729		if v_2.Op != OpAMD64FlagLT_ULT {
4730			break
4731		}
4732		v.copyOf(y)
4733		return true
4734	}
4735	// match: (CMOVQCC _ x (FlagLT_UGT))
4736	// result: x
4737	for {
4738		x := v_1
4739		if v_2.Op != OpAMD64FlagLT_UGT {
4740			break
4741		}
4742		v.copyOf(x)
4743		return true
4744	}
4745	return false
4746}
4747func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
4748	v_2 := v.Args[2]
4749	v_1 := v.Args[1]
4750	v_0 := v.Args[0]
4751	// match: (CMOVQCS x y (InvertFlags cond))
4752	// result: (CMOVQHI x y cond)
4753	for {
4754		x := v_0
4755		y := v_1
4756		if v_2.Op != OpAMD64InvertFlags {
4757			break
4758		}
4759		cond := v_2.Args[0]
4760		v.reset(OpAMD64CMOVQHI)
4761		v.AddArg3(x, y, cond)
4762		return true
4763	}
4764	// match: (CMOVQCS y _ (FlagEQ))
4765	// result: y
4766	for {
4767		y := v_0
4768		if v_2.Op != OpAMD64FlagEQ {
4769			break
4770		}
4771		v.copyOf(y)
4772		return true
4773	}
4774	// match: (CMOVQCS y _ (FlagGT_UGT))
4775	// result: y
4776	for {
4777		y := v_0
4778		if v_2.Op != OpAMD64FlagGT_UGT {
4779			break
4780		}
4781		v.copyOf(y)
4782		return true
4783	}
4784	// match: (CMOVQCS _ x (FlagGT_ULT))
4785	// result: x
4786	for {
4787		x := v_1
4788		if v_2.Op != OpAMD64FlagGT_ULT {
4789			break
4790		}
4791		v.copyOf(x)
4792		return true
4793	}
4794	// match: (CMOVQCS _ x (FlagLT_ULT))
4795	// result: x
4796	for {
4797		x := v_1
4798		if v_2.Op != OpAMD64FlagLT_ULT {
4799			break
4800		}
4801		v.copyOf(x)
4802		return true
4803	}
4804	// match: (CMOVQCS y _ (FlagLT_UGT))
4805	// result: y
4806	for {
4807		y := v_0
4808		if v_2.Op != OpAMD64FlagLT_UGT {
4809			break
4810		}
4811		v.copyOf(y)
4812		return true
4813	}
4814	return false
4815}
4816func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
4817	v_2 := v.Args[2]
4818	v_1 := v.Args[1]
4819	v_0 := v.Args[0]
4820	b := v.Block
4821	// match: (CMOVQEQ x y (InvertFlags cond))
4822	// result: (CMOVQEQ x y cond)
4823	for {
4824		x := v_0
4825		y := v_1
4826		if v_2.Op != OpAMD64InvertFlags {
4827			break
4828		}
4829		cond := v_2.Args[0]
4830		v.reset(OpAMD64CMOVQEQ)
4831		v.AddArg3(x, y, cond)
4832		return true
4833	}
4834	// match: (CMOVQEQ _ x (FlagEQ))
4835	// result: x
4836	for {
4837		x := v_1
4838		if v_2.Op != OpAMD64FlagEQ {
4839			break
4840		}
4841		v.copyOf(x)
4842		return true
4843	}
4844	// match: (CMOVQEQ y _ (FlagGT_UGT))
4845	// result: y
4846	for {
4847		y := v_0
4848		if v_2.Op != OpAMD64FlagGT_UGT {
4849			break
4850		}
4851		v.copyOf(y)
4852		return true
4853	}
4854	// match: (CMOVQEQ y _ (FlagGT_ULT))
4855	// result: y
4856	for {
4857		y := v_0
4858		if v_2.Op != OpAMD64FlagGT_ULT {
4859			break
4860		}
4861		v.copyOf(y)
4862		return true
4863	}
4864	// match: (CMOVQEQ y _ (FlagLT_ULT))
4865	// result: y
4866	for {
4867		y := v_0
4868		if v_2.Op != OpAMD64FlagLT_ULT {
4869			break
4870		}
4871		v.copyOf(y)
4872		return true
4873	}
4874	// match: (CMOVQEQ y _ (FlagLT_UGT))
4875	// result: y
4876	for {
4877		y := v_0
4878		if v_2.Op != OpAMD64FlagLT_UGT {
4879			break
4880		}
4881		v.copyOf(y)
4882		return true
4883	}
4884	// match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
4885	// cond: c != 0
4886	// result: x
4887	for {
4888		x := v_0
4889		if v_2.Op != OpSelect1 {
4890			break
4891		}
4892		v_2_0 := v_2.Args[0]
4893		if v_2_0.Op != OpAMD64BSFQ {
4894			break
4895		}
4896		v_2_0_0 := v_2_0.Args[0]
4897		if v_2_0_0.Op != OpAMD64ORQconst {
4898			break
4899		}
4900		c := auxIntToInt32(v_2_0_0.AuxInt)
4901		if !(c != 0) {
4902			break
4903		}
4904		v.copyOf(x)
4905		return true
4906	}
4907	// match: (CMOVQEQ x _ (Select1 (BSRQ (ORQconst [c] _))))
4908	// cond: c != 0
4909	// result: x
4910	for {
4911		x := v_0
4912		if v_2.Op != OpSelect1 {
4913			break
4914		}
4915		v_2_0 := v_2.Args[0]
4916		if v_2_0.Op != OpAMD64BSRQ {
4917			break
4918		}
4919		v_2_0_0 := v_2_0.Args[0]
4920		if v_2_0_0.Op != OpAMD64ORQconst {
4921			break
4922		}
4923		c := auxIntToInt32(v_2_0_0.AuxInt)
4924		if !(c != 0) {
4925			break
4926		}
4927		v.copyOf(x)
4928		return true
4929	}
4930	// match: (CMOVQEQ x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
4931	// result: (CMOVQEQ x y (Select1 <types.TypeFlags> blsr))
4932	for {
4933		x := v_0
4934		y := v_1
4935		if v_2.Op != OpAMD64TESTQ {
4936			break
4937		}
4938		_ = v_2.Args[1]
4939		v_2_0 := v_2.Args[0]
4940		v_2_1 := v_2.Args[1]
4941		for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4942			s := v_2_0
4943			if s.Op != OpSelect0 {
4944				continue
4945			}
4946			blsr := s.Args[0]
4947			if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4948				continue
4949			}
4950			v.reset(OpAMD64CMOVQEQ)
4951			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4952			v0.AddArg(blsr)
4953			v.AddArg3(x, y, v0)
4954			return true
4955		}
4956		break
4957	}
4958	// match: (CMOVQEQ x y (TESTL s:(Select0 blsr:(BLSRL _)) s))
4959	// result: (CMOVQEQ x y (Select1 <types.TypeFlags> blsr))
4960	for {
4961		x := v_0
4962		y := v_1
4963		if v_2.Op != OpAMD64TESTL {
4964			break
4965		}
4966		_ = v_2.Args[1]
4967		v_2_0 := v_2.Args[0]
4968		v_2_1 := v_2.Args[1]
4969		for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4970			s := v_2_0
4971			if s.Op != OpSelect0 {
4972				continue
4973			}
4974			blsr := s.Args[0]
4975			if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4976				continue
4977			}
4978			v.reset(OpAMD64CMOVQEQ)
4979			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4980			v0.AddArg(blsr)
4981			v.AddArg3(x, y, v0)
4982			return true
4983		}
4984		break
4985	}
4986	return false
4987}
4988func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
4989	v_2 := v.Args[2]
4990	v_1 := v.Args[1]
4991	v_0 := v.Args[0]
4992	// match: (CMOVQGE x y (InvertFlags cond))
4993	// result: (CMOVQLE x y cond)
4994	for {
4995		x := v_0
4996		y := v_1
4997		if v_2.Op != OpAMD64InvertFlags {
4998			break
4999		}
5000		cond := v_2.Args[0]
5001		v.reset(OpAMD64CMOVQLE)
5002		v.AddArg3(x, y, cond)
5003		return true
5004	}
5005	// match: (CMOVQGE _ x (FlagEQ))
5006	// result: x
5007	for {
5008		x := v_1
5009		if v_2.Op != OpAMD64FlagEQ {
5010			break
5011		}
5012		v.copyOf(x)
5013		return true
5014	}
5015	// match: (CMOVQGE _ x (FlagGT_UGT))
5016	// result: x
5017	for {
5018		x := v_1
5019		if v_2.Op != OpAMD64FlagGT_UGT {
5020			break
5021		}
5022		v.copyOf(x)
5023		return true
5024	}
5025	// match: (CMOVQGE _ x (FlagGT_ULT))
5026	// result: x
5027	for {
5028		x := v_1
5029		if v_2.Op != OpAMD64FlagGT_ULT {
5030			break
5031		}
5032		v.copyOf(x)
5033		return true
5034	}
5035	// match: (CMOVQGE y _ (FlagLT_ULT))
5036	// result: y
5037	for {
5038		y := v_0
5039		if v_2.Op != OpAMD64FlagLT_ULT {
5040			break
5041		}
5042		v.copyOf(y)
5043		return true
5044	}
5045	// match: (CMOVQGE y _ (FlagLT_UGT))
5046	// result: y
5047	for {
5048		y := v_0
5049		if v_2.Op != OpAMD64FlagLT_UGT {
5050			break
5051		}
5052		v.copyOf(y)
5053		return true
5054	}
5055	return false
5056}
5057func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5058	v_2 := v.Args[2]
5059	v_1 := v.Args[1]
5060	v_0 := v.Args[0]
5061	// match: (CMOVQGT x y (InvertFlags cond))
5062	// result: (CMOVQLT x y cond)
5063	for {
5064		x := v_0
5065		y := v_1
5066		if v_2.Op != OpAMD64InvertFlags {
5067			break
5068		}
5069		cond := v_2.Args[0]
5070		v.reset(OpAMD64CMOVQLT)
5071		v.AddArg3(x, y, cond)
5072		return true
5073	}
5074	// match: (CMOVQGT y _ (FlagEQ))
5075	// result: y
5076	for {
5077		y := v_0
5078		if v_2.Op != OpAMD64FlagEQ {
5079			break
5080		}
5081		v.copyOf(y)
5082		return true
5083	}
5084	// match: (CMOVQGT _ x (FlagGT_UGT))
5085	// result: x
5086	for {
5087		x := v_1
5088		if v_2.Op != OpAMD64FlagGT_UGT {
5089			break
5090		}
5091		v.copyOf(x)
5092		return true
5093	}
5094	// match: (CMOVQGT _ x (FlagGT_ULT))
5095	// result: x
5096	for {
5097		x := v_1
5098		if v_2.Op != OpAMD64FlagGT_ULT {
5099			break
5100		}
5101		v.copyOf(x)
5102		return true
5103	}
5104	// match: (CMOVQGT y _ (FlagLT_ULT))
5105	// result: y
5106	for {
5107		y := v_0
5108		if v_2.Op != OpAMD64FlagLT_ULT {
5109			break
5110		}
5111		v.copyOf(y)
5112		return true
5113	}
5114	// match: (CMOVQGT y _ (FlagLT_UGT))
5115	// result: y
5116	for {
5117		y := v_0
5118		if v_2.Op != OpAMD64FlagLT_UGT {
5119			break
5120		}
5121		v.copyOf(y)
5122		return true
5123	}
5124	return false
5125}
5126func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5127	v_2 := v.Args[2]
5128	v_1 := v.Args[1]
5129	v_0 := v.Args[0]
5130	// match: (CMOVQHI x y (InvertFlags cond))
5131	// result: (CMOVQCS x y cond)
5132	for {
5133		x := v_0
5134		y := v_1
5135		if v_2.Op != OpAMD64InvertFlags {
5136			break
5137		}
5138		cond := v_2.Args[0]
5139		v.reset(OpAMD64CMOVQCS)
5140		v.AddArg3(x, y, cond)
5141		return true
5142	}
5143	// match: (CMOVQHI y _ (FlagEQ))
5144	// result: y
5145	for {
5146		y := v_0
5147		if v_2.Op != OpAMD64FlagEQ {
5148			break
5149		}
5150		v.copyOf(y)
5151		return true
5152	}
5153	// match: (CMOVQHI _ x (FlagGT_UGT))
5154	// result: x
5155	for {
5156		x := v_1
5157		if v_2.Op != OpAMD64FlagGT_UGT {
5158			break
5159		}
5160		v.copyOf(x)
5161		return true
5162	}
5163	// match: (CMOVQHI y _ (FlagGT_ULT))
5164	// result: y
5165	for {
5166		y := v_0
5167		if v_2.Op != OpAMD64FlagGT_ULT {
5168			break
5169		}
5170		v.copyOf(y)
5171		return true
5172	}
5173	// match: (CMOVQHI y _ (FlagLT_ULT))
5174	// result: y
5175	for {
5176		y := v_0
5177		if v_2.Op != OpAMD64FlagLT_ULT {
5178			break
5179		}
5180		v.copyOf(y)
5181		return true
5182	}
5183	// match: (CMOVQHI _ x (FlagLT_UGT))
5184	// result: x
5185	for {
5186		x := v_1
5187		if v_2.Op != OpAMD64FlagLT_UGT {
5188			break
5189		}
5190		v.copyOf(x)
5191		return true
5192	}
5193	return false
5194}
5195func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5196	v_2 := v.Args[2]
5197	v_1 := v.Args[1]
5198	v_0 := v.Args[0]
5199	// match: (CMOVQLE x y (InvertFlags cond))
5200	// result: (CMOVQGE x y cond)
5201	for {
5202		x := v_0
5203		y := v_1
5204		if v_2.Op != OpAMD64InvertFlags {
5205			break
5206		}
5207		cond := v_2.Args[0]
5208		v.reset(OpAMD64CMOVQGE)
5209		v.AddArg3(x, y, cond)
5210		return true
5211	}
5212	// match: (CMOVQLE _ x (FlagEQ))
5213	// result: x
5214	for {
5215		x := v_1
5216		if v_2.Op != OpAMD64FlagEQ {
5217			break
5218		}
5219		v.copyOf(x)
5220		return true
5221	}
5222	// match: (CMOVQLE y _ (FlagGT_UGT))
5223	// result: y
5224	for {
5225		y := v_0
5226		if v_2.Op != OpAMD64FlagGT_UGT {
5227			break
5228		}
5229		v.copyOf(y)
5230		return true
5231	}
5232	// match: (CMOVQLE y _ (FlagGT_ULT))
5233	// result: y
5234	for {
5235		y := v_0
5236		if v_2.Op != OpAMD64FlagGT_ULT {
5237			break
5238		}
5239		v.copyOf(y)
5240		return true
5241	}
5242	// match: (CMOVQLE _ x (FlagLT_ULT))
5243	// result: x
5244	for {
5245		x := v_1
5246		if v_2.Op != OpAMD64FlagLT_ULT {
5247			break
5248		}
5249		v.copyOf(x)
5250		return true
5251	}
5252	// match: (CMOVQLE _ x (FlagLT_UGT))
5253	// result: x
5254	for {
5255		x := v_1
5256		if v_2.Op != OpAMD64FlagLT_UGT {
5257			break
5258		}
5259		v.copyOf(x)
5260		return true
5261	}
5262	return false
5263}
5264func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5265	v_2 := v.Args[2]
5266	v_1 := v.Args[1]
5267	v_0 := v.Args[0]
5268	// match: (CMOVQLS x y (InvertFlags cond))
5269	// result: (CMOVQCC x y cond)
5270	for {
5271		x := v_0
5272		y := v_1
5273		if v_2.Op != OpAMD64InvertFlags {
5274			break
5275		}
5276		cond := v_2.Args[0]
5277		v.reset(OpAMD64CMOVQCC)
5278		v.AddArg3(x, y, cond)
5279		return true
5280	}
5281	// match: (CMOVQLS _ x (FlagEQ))
5282	// result: x
5283	for {
5284		x := v_1
5285		if v_2.Op != OpAMD64FlagEQ {
5286			break
5287		}
5288		v.copyOf(x)
5289		return true
5290	}
5291	// match: (CMOVQLS y _ (FlagGT_UGT))
5292	// result: y
5293	for {
5294		y := v_0
5295		if v_2.Op != OpAMD64FlagGT_UGT {
5296			break
5297		}
5298		v.copyOf(y)
5299		return true
5300	}
5301	// match: (CMOVQLS _ x (FlagGT_ULT))
5302	// result: x
5303	for {
5304		x := v_1
5305		if v_2.Op != OpAMD64FlagGT_ULT {
5306			break
5307		}
5308		v.copyOf(x)
5309		return true
5310	}
5311	// match: (CMOVQLS _ x (FlagLT_ULT))
5312	// result: x
5313	for {
5314		x := v_1
5315		if v_2.Op != OpAMD64FlagLT_ULT {
5316			break
5317		}
5318		v.copyOf(x)
5319		return true
5320	}
5321	// match: (CMOVQLS y _ (FlagLT_UGT))
5322	// result: y
5323	for {
5324		y := v_0
5325		if v_2.Op != OpAMD64FlagLT_UGT {
5326			break
5327		}
5328		v.copyOf(y)
5329		return true
5330	}
5331	return false
5332}
5333func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5334	v_2 := v.Args[2]
5335	v_1 := v.Args[1]
5336	v_0 := v.Args[0]
5337	// match: (CMOVQLT x y (InvertFlags cond))
5338	// result: (CMOVQGT x y cond)
5339	for {
5340		x := v_0
5341		y := v_1
5342		if v_2.Op != OpAMD64InvertFlags {
5343			break
5344		}
5345		cond := v_2.Args[0]
5346		v.reset(OpAMD64CMOVQGT)
5347		v.AddArg3(x, y, cond)
5348		return true
5349	}
5350	// match: (CMOVQLT y _ (FlagEQ))
5351	// result: y
5352	for {
5353		y := v_0
5354		if v_2.Op != OpAMD64FlagEQ {
5355			break
5356		}
5357		v.copyOf(y)
5358		return true
5359	}
5360	// match: (CMOVQLT y _ (FlagGT_UGT))
5361	// result: y
5362	for {
5363		y := v_0
5364		if v_2.Op != OpAMD64FlagGT_UGT {
5365			break
5366		}
5367		v.copyOf(y)
5368		return true
5369	}
5370	// match: (CMOVQLT y _ (FlagGT_ULT))
5371	// result: y
5372	for {
5373		y := v_0
5374		if v_2.Op != OpAMD64FlagGT_ULT {
5375			break
5376		}
5377		v.copyOf(y)
5378		return true
5379	}
5380	// match: (CMOVQLT _ x (FlagLT_ULT))
5381	// result: x
5382	for {
5383		x := v_1
5384		if v_2.Op != OpAMD64FlagLT_ULT {
5385			break
5386		}
5387		v.copyOf(x)
5388		return true
5389	}
5390	// match: (CMOVQLT _ x (FlagLT_UGT))
5391	// result: x
5392	for {
5393		x := v_1
5394		if v_2.Op != OpAMD64FlagLT_UGT {
5395			break
5396		}
5397		v.copyOf(x)
5398		return true
5399	}
5400	return false
5401}
5402func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5403	v_2 := v.Args[2]
5404	v_1 := v.Args[1]
5405	v_0 := v.Args[0]
5406	b := v.Block
5407	// match: (CMOVQNE x y (InvertFlags cond))
5408	// result: (CMOVQNE x y cond)
5409	for {
5410		x := v_0
5411		y := v_1
5412		if v_2.Op != OpAMD64InvertFlags {
5413			break
5414		}
5415		cond := v_2.Args[0]
5416		v.reset(OpAMD64CMOVQNE)
5417		v.AddArg3(x, y, cond)
5418		return true
5419	}
5420	// match: (CMOVQNE y _ (FlagEQ))
5421	// result: y
5422	for {
5423		y := v_0
5424		if v_2.Op != OpAMD64FlagEQ {
5425			break
5426		}
5427		v.copyOf(y)
5428		return true
5429	}
5430	// match: (CMOVQNE _ x (FlagGT_UGT))
5431	// result: x
5432	for {
5433		x := v_1
5434		if v_2.Op != OpAMD64FlagGT_UGT {
5435			break
5436		}
5437		v.copyOf(x)
5438		return true
5439	}
5440	// match: (CMOVQNE _ x (FlagGT_ULT))
5441	// result: x
5442	for {
5443		x := v_1
5444		if v_2.Op != OpAMD64FlagGT_ULT {
5445			break
5446		}
5447		v.copyOf(x)
5448		return true
5449	}
5450	// match: (CMOVQNE _ x (FlagLT_ULT))
5451	// result: x
5452	for {
5453		x := v_1
5454		if v_2.Op != OpAMD64FlagLT_ULT {
5455			break
5456		}
5457		v.copyOf(x)
5458		return true
5459	}
5460	// match: (CMOVQNE _ x (FlagLT_UGT))
5461	// result: x
5462	for {
5463		x := v_1
5464		if v_2.Op != OpAMD64FlagLT_UGT {
5465			break
5466		}
5467		v.copyOf(x)
5468		return true
5469	}
5470	// match: (CMOVQNE x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
5471	// result: (CMOVQNE x y (Select1 <types.TypeFlags> blsr))
5472	for {
5473		x := v_0
5474		y := v_1
5475		if v_2.Op != OpAMD64TESTQ {
5476			break
5477		}
5478		_ = v_2.Args[1]
5479		v_2_0 := v_2.Args[0]
5480		v_2_1 := v_2.Args[1]
5481		for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5482			s := v_2_0
5483			if s.Op != OpSelect0 {
5484				continue
5485			}
5486			blsr := s.Args[0]
5487			if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
5488				continue
5489			}
5490			v.reset(OpAMD64CMOVQNE)
5491			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5492			v0.AddArg(blsr)
5493			v.AddArg3(x, y, v0)
5494			return true
5495		}
5496		break
5497	}
5498	// match: (CMOVQNE x y (TESTL s:(Select0 blsr:(BLSRL _)) s))
5499	// result: (CMOVQNE x y (Select1 <types.TypeFlags> blsr))
5500	for {
5501		x := v_0
5502		y := v_1
5503		if v_2.Op != OpAMD64TESTL {
5504			break
5505		}
5506		_ = v_2.Args[1]
5507		v_2_0 := v_2.Args[0]
5508		v_2_1 := v_2.Args[1]
5509		for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5510			s := v_2_0
5511			if s.Op != OpSelect0 {
5512				continue
5513			}
5514			blsr := s.Args[0]
5515			if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
5516				continue
5517			}
5518			v.reset(OpAMD64CMOVQNE)
5519			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5520			v0.AddArg(blsr)
5521			v.AddArg3(x, y, v0)
5522			return true
5523		}
5524		break
5525	}
5526	return false
5527}
5528func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
5529	v_2 := v.Args[2]
5530	v_1 := v.Args[1]
5531	v_0 := v.Args[0]
5532	// match: (CMOVWCC x y (InvertFlags cond))
5533	// result: (CMOVWLS x y cond)
5534	for {
5535		x := v_0
5536		y := v_1
5537		if v_2.Op != OpAMD64InvertFlags {
5538			break
5539		}
5540		cond := v_2.Args[0]
5541		v.reset(OpAMD64CMOVWLS)
5542		v.AddArg3(x, y, cond)
5543		return true
5544	}
5545	// match: (CMOVWCC _ x (FlagEQ))
5546	// result: x
5547	for {
5548		x := v_1
5549		if v_2.Op != OpAMD64FlagEQ {
5550			break
5551		}
5552		v.copyOf(x)
5553		return true
5554	}
5555	// match: (CMOVWCC _ x (FlagGT_UGT))
5556	// result: x
5557	for {
5558		x := v_1
5559		if v_2.Op != OpAMD64FlagGT_UGT {
5560			break
5561		}
5562		v.copyOf(x)
5563		return true
5564	}
5565	// match: (CMOVWCC y _ (FlagGT_ULT))
5566	// result: y
5567	for {
5568		y := v_0
5569		if v_2.Op != OpAMD64FlagGT_ULT {
5570			break
5571		}
5572		v.copyOf(y)
5573		return true
5574	}
5575	// match: (CMOVWCC y _ (FlagLT_ULT))
5576	// result: y
5577	for {
5578		y := v_0
5579		if v_2.Op != OpAMD64FlagLT_ULT {
5580			break
5581		}
5582		v.copyOf(y)
5583		return true
5584	}
5585	// match: (CMOVWCC _ x (FlagLT_UGT))
5586	// result: x
5587	for {
5588		x := v_1
5589		if v_2.Op != OpAMD64FlagLT_UGT {
5590			break
5591		}
5592		v.copyOf(x)
5593		return true
5594	}
5595	return false
5596}
5597func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
5598	v_2 := v.Args[2]
5599	v_1 := v.Args[1]
5600	v_0 := v.Args[0]
5601	// match: (CMOVWCS x y (InvertFlags cond))
5602	// result: (CMOVWHI x y cond)
5603	for {
5604		x := v_0
5605		y := v_1
5606		if v_2.Op != OpAMD64InvertFlags {
5607			break
5608		}
5609		cond := v_2.Args[0]
5610		v.reset(OpAMD64CMOVWHI)
5611		v.AddArg3(x, y, cond)
5612		return true
5613	}
5614	// match: (CMOVWCS y _ (FlagEQ))
5615	// result: y
5616	for {
5617		y := v_0
5618		if v_2.Op != OpAMD64FlagEQ {
5619			break
5620		}
5621		v.copyOf(y)
5622		return true
5623	}
5624	// match: (CMOVWCS y _ (FlagGT_UGT))
5625	// result: y
5626	for {
5627		y := v_0
5628		if v_2.Op != OpAMD64FlagGT_UGT {
5629			break
5630		}
5631		v.copyOf(y)
5632		return true
5633	}
5634	// match: (CMOVWCS _ x (FlagGT_ULT))
5635	// result: x
5636	for {
5637		x := v_1
5638		if v_2.Op != OpAMD64FlagGT_ULT {
5639			break
5640		}
5641		v.copyOf(x)
5642		return true
5643	}
5644	// match: (CMOVWCS _ x (FlagLT_ULT))
5645	// result: x
5646	for {
5647		x := v_1
5648		if v_2.Op != OpAMD64FlagLT_ULT {
5649			break
5650		}
5651		v.copyOf(x)
5652		return true
5653	}
5654	// match: (CMOVWCS y _ (FlagLT_UGT))
5655	// result: y
5656	for {
5657		y := v_0
5658		if v_2.Op != OpAMD64FlagLT_UGT {
5659			break
5660		}
5661		v.copyOf(y)
5662		return true
5663	}
5664	return false
5665}
5666func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
5667	v_2 := v.Args[2]
5668	v_1 := v.Args[1]
5669	v_0 := v.Args[0]
5670	// match: (CMOVWEQ x y (InvertFlags cond))
5671	// result: (CMOVWEQ x y cond)
5672	for {
5673		x := v_0
5674		y := v_1
5675		if v_2.Op != OpAMD64InvertFlags {
5676			break
5677		}
5678		cond := v_2.Args[0]
5679		v.reset(OpAMD64CMOVWEQ)
5680		v.AddArg3(x, y, cond)
5681		return true
5682	}
5683	// match: (CMOVWEQ _ x (FlagEQ))
5684	// result: x
5685	for {
5686		x := v_1
5687		if v_2.Op != OpAMD64FlagEQ {
5688			break
5689		}
5690		v.copyOf(x)
5691		return true
5692	}
5693	// match: (CMOVWEQ y _ (FlagGT_UGT))
5694	// result: y
5695	for {
5696		y := v_0
5697		if v_2.Op != OpAMD64FlagGT_UGT {
5698			break
5699		}
5700		v.copyOf(y)
5701		return true
5702	}
5703	// match: (CMOVWEQ y _ (FlagGT_ULT))
5704	// result: y
5705	for {
5706		y := v_0
5707		if v_2.Op != OpAMD64FlagGT_ULT {
5708			break
5709		}
5710		v.copyOf(y)
5711		return true
5712	}
5713	// match: (CMOVWEQ y _ (FlagLT_ULT))
5714	// result: y
5715	for {
5716		y := v_0
5717		if v_2.Op != OpAMD64FlagLT_ULT {
5718			break
5719		}
5720		v.copyOf(y)
5721		return true
5722	}
5723	// match: (CMOVWEQ y _ (FlagLT_UGT))
5724	// result: y
5725	for {
5726		y := v_0
5727		if v_2.Op != OpAMD64FlagLT_UGT {
5728			break
5729		}
5730		v.copyOf(y)
5731		return true
5732	}
5733	return false
5734}
5735func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
5736	v_2 := v.Args[2]
5737	v_1 := v.Args[1]
5738	v_0 := v.Args[0]
5739	// match: (CMOVWGE x y (InvertFlags cond))
5740	// result: (CMOVWLE x y cond)
5741	for {
5742		x := v_0
5743		y := v_1
5744		if v_2.Op != OpAMD64InvertFlags {
5745			break
5746		}
5747		cond := v_2.Args[0]
5748		v.reset(OpAMD64CMOVWLE)
5749		v.AddArg3(x, y, cond)
5750		return true
5751	}
5752	// match: (CMOVWGE _ x (FlagEQ))
5753	// result: x
5754	for {
5755		x := v_1
5756		if v_2.Op != OpAMD64FlagEQ {
5757			break
5758		}
5759		v.copyOf(x)
5760		return true
5761	}
5762	// match: (CMOVWGE _ x (FlagGT_UGT))
5763	// result: x
5764	for {
5765		x := v_1
5766		if v_2.Op != OpAMD64FlagGT_UGT {
5767			break
5768		}
5769		v.copyOf(x)
5770		return true
5771	}
5772	// match: (CMOVWGE _ x (FlagGT_ULT))
5773	// result: x
5774	for {
5775		x := v_1
5776		if v_2.Op != OpAMD64FlagGT_ULT {
5777			break
5778		}
5779		v.copyOf(x)
5780		return true
5781	}
5782	// match: (CMOVWGE y _ (FlagLT_ULT))
5783	// result: y
5784	for {
5785		y := v_0
5786		if v_2.Op != OpAMD64FlagLT_ULT {
5787			break
5788		}
5789		v.copyOf(y)
5790		return true
5791	}
5792	// match: (CMOVWGE y _ (FlagLT_UGT))
5793	// result: y
5794	for {
5795		y := v_0
5796		if v_2.Op != OpAMD64FlagLT_UGT {
5797			break
5798		}
5799		v.copyOf(y)
5800		return true
5801	}
5802	return false
5803}
5804func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
5805	v_2 := v.Args[2]
5806	v_1 := v.Args[1]
5807	v_0 := v.Args[0]
5808	// match: (CMOVWGT x y (InvertFlags cond))
5809	// result: (CMOVWLT x y cond)
5810	for {
5811		x := v_0
5812		y := v_1
5813		if v_2.Op != OpAMD64InvertFlags {
5814			break
5815		}
5816		cond := v_2.Args[0]
5817		v.reset(OpAMD64CMOVWLT)
5818		v.AddArg3(x, y, cond)
5819		return true
5820	}
5821	// match: (CMOVWGT y _ (FlagEQ))
5822	// result: y
5823	for {
5824		y := v_0
5825		if v_2.Op != OpAMD64FlagEQ {
5826			break
5827		}
5828		v.copyOf(y)
5829		return true
5830	}
5831	// match: (CMOVWGT _ x (FlagGT_UGT))
5832	// result: x
5833	for {
5834		x := v_1
5835		if v_2.Op != OpAMD64FlagGT_UGT {
5836			break
5837		}
5838		v.copyOf(x)
5839		return true
5840	}
5841	// match: (CMOVWGT _ x (FlagGT_ULT))
5842	// result: x
5843	for {
5844		x := v_1
5845		if v_2.Op != OpAMD64FlagGT_ULT {
5846			break
5847		}
5848		v.copyOf(x)
5849		return true
5850	}
5851	// match: (CMOVWGT y _ (FlagLT_ULT))
5852	// result: y
5853	for {
5854		y := v_0
5855		if v_2.Op != OpAMD64FlagLT_ULT {
5856			break
5857		}
5858		v.copyOf(y)
5859		return true
5860	}
5861	// match: (CMOVWGT y _ (FlagLT_UGT))
5862	// result: y
5863	for {
5864		y := v_0
5865		if v_2.Op != OpAMD64FlagLT_UGT {
5866			break
5867		}
5868		v.copyOf(y)
5869		return true
5870	}
5871	return false
5872}
5873func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
5874	v_2 := v.Args[2]
5875	v_1 := v.Args[1]
5876	v_0 := v.Args[0]
5877	// match: (CMOVWHI x y (InvertFlags cond))
5878	// result: (CMOVWCS x y cond)
5879	for {
5880		x := v_0
5881		y := v_1
5882		if v_2.Op != OpAMD64InvertFlags {
5883			break
5884		}
5885		cond := v_2.Args[0]
5886		v.reset(OpAMD64CMOVWCS)
5887		v.AddArg3(x, y, cond)
5888		return true
5889	}
5890	// match: (CMOVWHI y _ (FlagEQ))
5891	// result: y
5892	for {
5893		y := v_0
5894		if v_2.Op != OpAMD64FlagEQ {
5895			break
5896		}
5897		v.copyOf(y)
5898		return true
5899	}
5900	// match: (CMOVWHI _ x (FlagGT_UGT))
5901	// result: x
5902	for {
5903		x := v_1
5904		if v_2.Op != OpAMD64FlagGT_UGT {
5905			break
5906		}
5907		v.copyOf(x)
5908		return true
5909	}
5910	// match: (CMOVWHI y _ (FlagGT_ULT))
5911	// result: y
5912	for {
5913		y := v_0
5914		if v_2.Op != OpAMD64FlagGT_ULT {
5915			break
5916		}
5917		v.copyOf(y)
5918		return true
5919	}
5920	// match: (CMOVWHI y _ (FlagLT_ULT))
5921	// result: y
5922	for {
5923		y := v_0
5924		if v_2.Op != OpAMD64FlagLT_ULT {
5925			break
5926		}
5927		v.copyOf(y)
5928		return true
5929	}
5930	// match: (CMOVWHI _ x (FlagLT_UGT))
5931	// result: x
5932	for {
5933		x := v_1
5934		if v_2.Op != OpAMD64FlagLT_UGT {
5935			break
5936		}
5937		v.copyOf(x)
5938		return true
5939	}
5940	return false
5941}
5942func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
5943	v_2 := v.Args[2]
5944	v_1 := v.Args[1]
5945	v_0 := v.Args[0]
5946	// match: (CMOVWLE x y (InvertFlags cond))
5947	// result: (CMOVWGE x y cond)
5948	for {
5949		x := v_0
5950		y := v_1
5951		if v_2.Op != OpAMD64InvertFlags {
5952			break
5953		}
5954		cond := v_2.Args[0]
5955		v.reset(OpAMD64CMOVWGE)
5956		v.AddArg3(x, y, cond)
5957		return true
5958	}
5959	// match: (CMOVWLE _ x (FlagEQ))
5960	// result: x
5961	for {
5962		x := v_1
5963		if v_2.Op != OpAMD64FlagEQ {
5964			break
5965		}
5966		v.copyOf(x)
5967		return true
5968	}
5969	// match: (CMOVWLE y _ (FlagGT_UGT))
5970	// result: y
5971	for {
5972		y := v_0
5973		if v_2.Op != OpAMD64FlagGT_UGT {
5974			break
5975		}
5976		v.copyOf(y)
5977		return true
5978	}
5979	// match: (CMOVWLE y _ (FlagGT_ULT))
5980	// result: y
5981	for {
5982		y := v_0
5983		if v_2.Op != OpAMD64FlagGT_ULT {
5984			break
5985		}
5986		v.copyOf(y)
5987		return true
5988	}
5989	// match: (CMOVWLE _ x (FlagLT_ULT))
5990	// result: x
5991	for {
5992		x := v_1
5993		if v_2.Op != OpAMD64FlagLT_ULT {
5994			break
5995		}
5996		v.copyOf(x)
5997		return true
5998	}
5999	// match: (CMOVWLE _ x (FlagLT_UGT))
6000	// result: x
6001	for {
6002		x := v_1
6003		if v_2.Op != OpAMD64FlagLT_UGT {
6004			break
6005		}
6006		v.copyOf(x)
6007		return true
6008	}
6009	return false
6010}
6011func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6012	v_2 := v.Args[2]
6013	v_1 := v.Args[1]
6014	v_0 := v.Args[0]
6015	// match: (CMOVWLS x y (InvertFlags cond))
6016	// result: (CMOVWCC x y cond)
6017	for {
6018		x := v_0
6019		y := v_1
6020		if v_2.Op != OpAMD64InvertFlags {
6021			break
6022		}
6023		cond := v_2.Args[0]
6024		v.reset(OpAMD64CMOVWCC)
6025		v.AddArg3(x, y, cond)
6026		return true
6027	}
6028	// match: (CMOVWLS _ x (FlagEQ))
6029	// result: x
6030	for {
6031		x := v_1
6032		if v_2.Op != OpAMD64FlagEQ {
6033			break
6034		}
6035		v.copyOf(x)
6036		return true
6037	}
6038	// match: (CMOVWLS y _ (FlagGT_UGT))
6039	// result: y
6040	for {
6041		y := v_0
6042		if v_2.Op != OpAMD64FlagGT_UGT {
6043			break
6044		}
6045		v.copyOf(y)
6046		return true
6047	}
6048	// match: (CMOVWLS _ x (FlagGT_ULT))
6049	// result: x
6050	for {
6051		x := v_1
6052		if v_2.Op != OpAMD64FlagGT_ULT {
6053			break
6054		}
6055		v.copyOf(x)
6056		return true
6057	}
6058	// match: (CMOVWLS _ x (FlagLT_ULT))
6059	// result: x
6060	for {
6061		x := v_1
6062		if v_2.Op != OpAMD64FlagLT_ULT {
6063			break
6064		}
6065		v.copyOf(x)
6066		return true
6067	}
6068	// match: (CMOVWLS y _ (FlagLT_UGT))
6069	// result: y
6070	for {
6071		y := v_0
6072		if v_2.Op != OpAMD64FlagLT_UGT {
6073			break
6074		}
6075		v.copyOf(y)
6076		return true
6077	}
6078	return false
6079}
6080func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6081	v_2 := v.Args[2]
6082	v_1 := v.Args[1]
6083	v_0 := v.Args[0]
6084	// match: (CMOVWLT x y (InvertFlags cond))
6085	// result: (CMOVWGT x y cond)
6086	for {
6087		x := v_0
6088		y := v_1
6089		if v_2.Op != OpAMD64InvertFlags {
6090			break
6091		}
6092		cond := v_2.Args[0]
6093		v.reset(OpAMD64CMOVWGT)
6094		v.AddArg3(x, y, cond)
6095		return true
6096	}
6097	// match: (CMOVWLT y _ (FlagEQ))
6098	// result: y
6099	for {
6100		y := v_0
6101		if v_2.Op != OpAMD64FlagEQ {
6102			break
6103		}
6104		v.copyOf(y)
6105		return true
6106	}
6107	// match: (CMOVWLT y _ (FlagGT_UGT))
6108	// result: y
6109	for {
6110		y := v_0
6111		if v_2.Op != OpAMD64FlagGT_UGT {
6112			break
6113		}
6114		v.copyOf(y)
6115		return true
6116	}
6117	// match: (CMOVWLT y _ (FlagGT_ULT))
6118	// result: y
6119	for {
6120		y := v_0
6121		if v_2.Op != OpAMD64FlagGT_ULT {
6122			break
6123		}
6124		v.copyOf(y)
6125		return true
6126	}
6127	// match: (CMOVWLT _ x (FlagLT_ULT))
6128	// result: x
6129	for {
6130		x := v_1
6131		if v_2.Op != OpAMD64FlagLT_ULT {
6132			break
6133		}
6134		v.copyOf(x)
6135		return true
6136	}
6137	// match: (CMOVWLT _ x (FlagLT_UGT))
6138	// result: x
6139	for {
6140		x := v_1
6141		if v_2.Op != OpAMD64FlagLT_UGT {
6142			break
6143		}
6144		v.copyOf(x)
6145		return true
6146	}
6147	return false
6148}
6149func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6150	v_2 := v.Args[2]
6151	v_1 := v.Args[1]
6152	v_0 := v.Args[0]
6153	// match: (CMOVWNE x y (InvertFlags cond))
6154	// result: (CMOVWNE x y cond)
6155	for {
6156		x := v_0
6157		y := v_1
6158		if v_2.Op != OpAMD64InvertFlags {
6159			break
6160		}
6161		cond := v_2.Args[0]
6162		v.reset(OpAMD64CMOVWNE)
6163		v.AddArg3(x, y, cond)
6164		return true
6165	}
6166	// match: (CMOVWNE y _ (FlagEQ))
6167	// result: y
6168	for {
6169		y := v_0
6170		if v_2.Op != OpAMD64FlagEQ {
6171			break
6172		}
6173		v.copyOf(y)
6174		return true
6175	}
6176	// match: (CMOVWNE _ x (FlagGT_UGT))
6177	// result: x
6178	for {
6179		x := v_1
6180		if v_2.Op != OpAMD64FlagGT_UGT {
6181			break
6182		}
6183		v.copyOf(x)
6184		return true
6185	}
6186	// match: (CMOVWNE _ x (FlagGT_ULT))
6187	// result: x
6188	for {
6189		x := v_1
6190		if v_2.Op != OpAMD64FlagGT_ULT {
6191			break
6192		}
6193		v.copyOf(x)
6194		return true
6195	}
6196	// match: (CMOVWNE _ x (FlagLT_ULT))
6197	// result: x
6198	for {
6199		x := v_1
6200		if v_2.Op != OpAMD64FlagLT_ULT {
6201			break
6202		}
6203		v.copyOf(x)
6204		return true
6205	}
6206	// match: (CMOVWNE _ x (FlagLT_UGT))
6207	// result: x
6208	for {
6209		x := v_1
6210		if v_2.Op != OpAMD64FlagLT_UGT {
6211			break
6212		}
6213		v.copyOf(x)
6214		return true
6215	}
6216	return false
6217}
6218func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6219	v_1 := v.Args[1]
6220	v_0 := v.Args[0]
6221	b := v.Block
6222	// match: (CMPB x (MOVLconst [c]))
6223	// result: (CMPBconst x [int8(c)])
6224	for {
6225		x := v_0
6226		if v_1.Op != OpAMD64MOVLconst {
6227			break
6228		}
6229		c := auxIntToInt32(v_1.AuxInt)
6230		v.reset(OpAMD64CMPBconst)
6231		v.AuxInt = int8ToAuxInt(int8(c))
6232		v.AddArg(x)
6233		return true
6234	}
6235	// match: (CMPB (MOVLconst [c]) x)
6236	// result: (InvertFlags (CMPBconst x [int8(c)]))
6237	for {
6238		if v_0.Op != OpAMD64MOVLconst {
6239			break
6240		}
6241		c := auxIntToInt32(v_0.AuxInt)
6242		x := v_1
6243		v.reset(OpAMD64InvertFlags)
6244		v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6245		v0.AuxInt = int8ToAuxInt(int8(c))
6246		v0.AddArg(x)
6247		v.AddArg(v0)
6248		return true
6249	}
6250	// match: (CMPB x y)
6251	// cond: canonLessThan(x,y)
6252	// result: (InvertFlags (CMPB y x))
6253	for {
6254		x := v_0
6255		y := v_1
6256		if !(canonLessThan(x, y)) {
6257			break
6258		}
6259		v.reset(OpAMD64InvertFlags)
6260		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6261		v0.AddArg2(y, x)
6262		v.AddArg(v0)
6263		return true
6264	}
6265	// match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
6266	// cond: canMergeLoad(v, l) && clobber(l)
6267	// result: (CMPBload {sym} [off] ptr x mem)
6268	for {
6269		l := v_0
6270		if l.Op != OpAMD64MOVBload {
6271			break
6272		}
6273		off := auxIntToInt32(l.AuxInt)
6274		sym := auxToSym(l.Aux)
6275		mem := l.Args[1]
6276		ptr := l.Args[0]
6277		x := v_1
6278		if !(canMergeLoad(v, l) && clobber(l)) {
6279			break
6280		}
6281		v.reset(OpAMD64CMPBload)
6282		v.AuxInt = int32ToAuxInt(off)
6283		v.Aux = symToAux(sym)
6284		v.AddArg3(ptr, x, mem)
6285		return true
6286	}
6287	// match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
6288	// cond: canMergeLoad(v, l) && clobber(l)
6289	// result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
6290	for {
6291		x := v_0
6292		l := v_1
6293		if l.Op != OpAMD64MOVBload {
6294			break
6295		}
6296		off := auxIntToInt32(l.AuxInt)
6297		sym := auxToSym(l.Aux)
6298		mem := l.Args[1]
6299		ptr := l.Args[0]
6300		if !(canMergeLoad(v, l) && clobber(l)) {
6301			break
6302		}
6303		v.reset(OpAMD64InvertFlags)
6304		v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6305		v0.AuxInt = int32ToAuxInt(off)
6306		v0.Aux = symToAux(sym)
6307		v0.AddArg3(ptr, x, mem)
6308		v.AddArg(v0)
6309		return true
6310	}
6311	return false
6312}
6313func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6314	v_0 := v.Args[0]
6315	b := v.Block
6316	// match: (CMPBconst (MOVLconst [x]) [y])
6317	// cond: int8(x)==y
6318	// result: (FlagEQ)
6319	for {
6320		y := auxIntToInt8(v.AuxInt)
6321		if v_0.Op != OpAMD64MOVLconst {
6322			break
6323		}
6324		x := auxIntToInt32(v_0.AuxInt)
6325		if !(int8(x) == y) {
6326			break
6327		}
6328		v.reset(OpAMD64FlagEQ)
6329		return true
6330	}
6331	// match: (CMPBconst (MOVLconst [x]) [y])
6332	// cond: int8(x)<y && uint8(x)<uint8(y)
6333	// result: (FlagLT_ULT)
6334	for {
6335		y := auxIntToInt8(v.AuxInt)
6336		if v_0.Op != OpAMD64MOVLconst {
6337			break
6338		}
6339		x := auxIntToInt32(v_0.AuxInt)
6340		if !(int8(x) < y && uint8(x) < uint8(y)) {
6341			break
6342		}
6343		v.reset(OpAMD64FlagLT_ULT)
6344		return true
6345	}
6346	// match: (CMPBconst (MOVLconst [x]) [y])
6347	// cond: int8(x)<y && uint8(x)>uint8(y)
6348	// result: (FlagLT_UGT)
6349	for {
6350		y := auxIntToInt8(v.AuxInt)
6351		if v_0.Op != OpAMD64MOVLconst {
6352			break
6353		}
6354		x := auxIntToInt32(v_0.AuxInt)
6355		if !(int8(x) < y && uint8(x) > uint8(y)) {
6356			break
6357		}
6358		v.reset(OpAMD64FlagLT_UGT)
6359		return true
6360	}
6361	// match: (CMPBconst (MOVLconst [x]) [y])
6362	// cond: int8(x)>y && uint8(x)<uint8(y)
6363	// result: (FlagGT_ULT)
6364	for {
6365		y := auxIntToInt8(v.AuxInt)
6366		if v_0.Op != OpAMD64MOVLconst {
6367			break
6368		}
6369		x := auxIntToInt32(v_0.AuxInt)
6370		if !(int8(x) > y && uint8(x) < uint8(y)) {
6371			break
6372		}
6373		v.reset(OpAMD64FlagGT_ULT)
6374		return true
6375	}
6376	// match: (CMPBconst (MOVLconst [x]) [y])
6377	// cond: int8(x)>y && uint8(x)>uint8(y)
6378	// result: (FlagGT_UGT)
6379	for {
6380		y := auxIntToInt8(v.AuxInt)
6381		if v_0.Op != OpAMD64MOVLconst {
6382			break
6383		}
6384		x := auxIntToInt32(v_0.AuxInt)
6385		if !(int8(x) > y && uint8(x) > uint8(y)) {
6386			break
6387		}
6388		v.reset(OpAMD64FlagGT_UGT)
6389		return true
6390	}
6391	// match: (CMPBconst (ANDLconst _ [m]) [n])
6392	// cond: 0 <= int8(m) && int8(m) < n
6393	// result: (FlagLT_ULT)
6394	for {
6395		n := auxIntToInt8(v.AuxInt)
6396		if v_0.Op != OpAMD64ANDLconst {
6397			break
6398		}
6399		m := auxIntToInt32(v_0.AuxInt)
6400		if !(0 <= int8(m) && int8(m) < n) {
6401			break
6402		}
6403		v.reset(OpAMD64FlagLT_ULT)
6404		return true
6405	}
6406	// match: (CMPBconst a:(ANDL x y) [0])
6407	// cond: a.Uses == 1
6408	// result: (TESTB x y)
6409	for {
6410		if auxIntToInt8(v.AuxInt) != 0 {
6411			break
6412		}
6413		a := v_0
6414		if a.Op != OpAMD64ANDL {
6415			break
6416		}
6417		y := a.Args[1]
6418		x := a.Args[0]
6419		if !(a.Uses == 1) {
6420			break
6421		}
6422		v.reset(OpAMD64TESTB)
6423		v.AddArg2(x, y)
6424		return true
6425	}
6426	// match: (CMPBconst a:(ANDLconst [c] x) [0])
6427	// cond: a.Uses == 1
6428	// result: (TESTBconst [int8(c)] x)
6429	for {
6430		if auxIntToInt8(v.AuxInt) != 0 {
6431			break
6432		}
6433		a := v_0
6434		if a.Op != OpAMD64ANDLconst {
6435			break
6436		}
6437		c := auxIntToInt32(a.AuxInt)
6438		x := a.Args[0]
6439		if !(a.Uses == 1) {
6440			break
6441		}
6442		v.reset(OpAMD64TESTBconst)
6443		v.AuxInt = int8ToAuxInt(int8(c))
6444		v.AddArg(x)
6445		return true
6446	}
6447	// match: (CMPBconst x [0])
6448	// result: (TESTB x x)
6449	for {
6450		if auxIntToInt8(v.AuxInt) != 0 {
6451			break
6452		}
6453		x := v_0
6454		v.reset(OpAMD64TESTB)
6455		v.AddArg2(x, x)
6456		return true
6457	}
6458	// match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
6459	// cond: l.Uses == 1 && clobber(l)
6460	// result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
6461	for {
6462		c := auxIntToInt8(v.AuxInt)
6463		l := v_0
6464		if l.Op != OpAMD64MOVBload {
6465			break
6466		}
6467		off := auxIntToInt32(l.AuxInt)
6468		sym := auxToSym(l.Aux)
6469		mem := l.Args[1]
6470		ptr := l.Args[0]
6471		if !(l.Uses == 1 && clobber(l)) {
6472			break
6473		}
6474		b = l.Block
6475		v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6476		v.copyOf(v0)
6477		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
6478		v0.Aux = symToAux(sym)
6479		v0.AddArg2(ptr, mem)
6480		return true
6481	}
6482	return false
6483}
6484func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6485	v_1 := v.Args[1]
6486	v_0 := v.Args[0]
6487	// match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
6488	// cond: ValAndOff(valoff1).canAdd32(off2)
6489	// result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
6490	for {
6491		valoff1 := auxIntToValAndOff(v.AuxInt)
6492		sym := auxToSym(v.Aux)
6493		if v_0.Op != OpAMD64ADDQconst {
6494			break
6495		}
6496		off2 := auxIntToInt32(v_0.AuxInt)
6497		base := v_0.Args[0]
6498		mem := v_1
6499		if !(ValAndOff(valoff1).canAdd32(off2)) {
6500			break
6501		}
6502		v.reset(OpAMD64CMPBconstload)
6503		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6504		v.Aux = symToAux(sym)
6505		v.AddArg2(base, mem)
6506		return true
6507	}
6508	// match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
6509	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
6510	// result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
6511	for {
6512		valoff1 := auxIntToValAndOff(v.AuxInt)
6513		sym1 := auxToSym(v.Aux)
6514		if v_0.Op != OpAMD64LEAQ {
6515			break
6516		}
6517		off2 := auxIntToInt32(v_0.AuxInt)
6518		sym2 := auxToSym(v_0.Aux)
6519		base := v_0.Args[0]
6520		mem := v_1
6521		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6522			break
6523		}
6524		v.reset(OpAMD64CMPBconstload)
6525		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6526		v.Aux = symToAux(mergeSym(sym1, sym2))
6527		v.AddArg2(base, mem)
6528		return true
6529	}
6530	return false
6531}
6532func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
6533	v_2 := v.Args[2]
6534	v_1 := v.Args[1]
6535	v_0 := v.Args[0]
6536	// match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem)
6537	// cond: is32Bit(int64(off1)+int64(off2))
6538	// result: (CMPBload [off1+off2] {sym} base val mem)
6539	for {
6540		off1 := auxIntToInt32(v.AuxInt)
6541		sym := auxToSym(v.Aux)
6542		if v_0.Op != OpAMD64ADDQconst {
6543			break
6544		}
6545		off2 := auxIntToInt32(v_0.AuxInt)
6546		base := v_0.Args[0]
6547		val := v_1
6548		mem := v_2
6549		if !(is32Bit(int64(off1) + int64(off2))) {
6550			break
6551		}
6552		v.reset(OpAMD64CMPBload)
6553		v.AuxInt = int32ToAuxInt(off1 + off2)
6554		v.Aux = symToAux(sym)
6555		v.AddArg3(base, val, mem)
6556		return true
6557	}
6558	// match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
6559	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
6560	// result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
6561	for {
6562		off1 := auxIntToInt32(v.AuxInt)
6563		sym1 := auxToSym(v.Aux)
6564		if v_0.Op != OpAMD64LEAQ {
6565			break
6566		}
6567		off2 := auxIntToInt32(v_0.AuxInt)
6568		sym2 := auxToSym(v_0.Aux)
6569		base := v_0.Args[0]
6570		val := v_1
6571		mem := v_2
6572		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6573			break
6574		}
6575		v.reset(OpAMD64CMPBload)
6576		v.AuxInt = int32ToAuxInt(off1 + off2)
6577		v.Aux = symToAux(mergeSym(sym1, sym2))
6578		v.AddArg3(base, val, mem)
6579		return true
6580	}
6581	// match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
6582	// result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
6583	for {
6584		off := auxIntToInt32(v.AuxInt)
6585		sym := auxToSym(v.Aux)
6586		ptr := v_0
6587		if v_1.Op != OpAMD64MOVLconst {
6588			break
6589		}
6590		c := auxIntToInt32(v_1.AuxInt)
6591		mem := v_2
6592		v.reset(OpAMD64CMPBconstload)
6593		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
6594		v.Aux = symToAux(sym)
6595		v.AddArg2(ptr, mem)
6596		return true
6597	}
6598	return false
6599}
6600func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
6601	v_1 := v.Args[1]
6602	v_0 := v.Args[0]
6603	b := v.Block
6604	// match: (CMPL x (MOVLconst [c]))
6605	// result: (CMPLconst x [c])
6606	for {
6607		x := v_0
6608		if v_1.Op != OpAMD64MOVLconst {
6609			break
6610		}
6611		c := auxIntToInt32(v_1.AuxInt)
6612		v.reset(OpAMD64CMPLconst)
6613		v.AuxInt = int32ToAuxInt(c)
6614		v.AddArg(x)
6615		return true
6616	}
6617	// match: (CMPL (MOVLconst [c]) x)
6618	// result: (InvertFlags (CMPLconst x [c]))
6619	for {
6620		if v_0.Op != OpAMD64MOVLconst {
6621			break
6622		}
6623		c := auxIntToInt32(v_0.AuxInt)
6624		x := v_1
6625		v.reset(OpAMD64InvertFlags)
6626		v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
6627		v0.AuxInt = int32ToAuxInt(c)
6628		v0.AddArg(x)
6629		v.AddArg(v0)
6630		return true
6631	}
6632	// match: (CMPL x y)
6633	// cond: canonLessThan(x,y)
6634	// result: (InvertFlags (CMPL y x))
6635	for {
6636		x := v_0
6637		y := v_1
6638		if !(canonLessThan(x, y)) {
6639			break
6640		}
6641		v.reset(OpAMD64InvertFlags)
6642		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
6643		v0.AddArg2(y, x)
6644		v.AddArg(v0)
6645		return true
6646	}
6647	// match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
6648	// cond: canMergeLoad(v, l) && clobber(l)
6649	// result: (CMPLload {sym} [off] ptr x mem)
6650	for {
6651		l := v_0
6652		if l.Op != OpAMD64MOVLload {
6653			break
6654		}
6655		off := auxIntToInt32(l.AuxInt)
6656		sym := auxToSym(l.Aux)
6657		mem := l.Args[1]
6658		ptr := l.Args[0]
6659		x := v_1
6660		if !(canMergeLoad(v, l) && clobber(l)) {
6661			break
6662		}
6663		v.reset(OpAMD64CMPLload)
6664		v.AuxInt = int32ToAuxInt(off)
6665		v.Aux = symToAux(sym)
6666		v.AddArg3(ptr, x, mem)
6667		return true
6668	}
6669	// match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
6670	// cond: canMergeLoad(v, l) && clobber(l)
6671	// result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
6672	for {
6673		x := v_0
6674		l := v_1
6675		if l.Op != OpAMD64MOVLload {
6676			break
6677		}
6678		off := auxIntToInt32(l.AuxInt)
6679		sym := auxToSym(l.Aux)
6680		mem := l.Args[1]
6681		ptr := l.Args[0]
6682		if !(canMergeLoad(v, l) && clobber(l)) {
6683			break
6684		}
6685		v.reset(OpAMD64InvertFlags)
6686		v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
6687		v0.AuxInt = int32ToAuxInt(off)
6688		v0.Aux = symToAux(sym)
6689		v0.AddArg3(ptr, x, mem)
6690		v.AddArg(v0)
6691		return true
6692	}
6693	return false
6694}
6695func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
6696	v_0 := v.Args[0]
6697	b := v.Block
6698	// match: (CMPLconst (MOVLconst [x]) [y])
6699	// cond: x==y
6700	// result: (FlagEQ)
6701	for {
6702		y := auxIntToInt32(v.AuxInt)
6703		if v_0.Op != OpAMD64MOVLconst {
6704			break
6705		}
6706		x := auxIntToInt32(v_0.AuxInt)
6707		if !(x == y) {
6708			break
6709		}
6710		v.reset(OpAMD64FlagEQ)
6711		return true
6712	}
6713	// match: (CMPLconst (MOVLconst [x]) [y])
6714	// cond: x<y && uint32(x)<uint32(y)
6715	// result: (FlagLT_ULT)
6716	for {
6717		y := auxIntToInt32(v.AuxInt)
6718		if v_0.Op != OpAMD64MOVLconst {
6719			break
6720		}
6721		x := auxIntToInt32(v_0.AuxInt)
6722		if !(x < y && uint32(x) < uint32(y)) {
6723			break
6724		}
6725		v.reset(OpAMD64FlagLT_ULT)
6726		return true
6727	}
6728	// match: (CMPLconst (MOVLconst [x]) [y])
6729	// cond: x<y && uint32(x)>uint32(y)
6730	// result: (FlagLT_UGT)
6731	for {
6732		y := auxIntToInt32(v.AuxInt)
6733		if v_0.Op != OpAMD64MOVLconst {
6734			break
6735		}
6736		x := auxIntToInt32(v_0.AuxInt)
6737		if !(x < y && uint32(x) > uint32(y)) {
6738			break
6739		}
6740		v.reset(OpAMD64FlagLT_UGT)
6741		return true
6742	}
6743	// match: (CMPLconst (MOVLconst [x]) [y])
6744	// cond: x>y && uint32(x)<uint32(y)
6745	// result: (FlagGT_ULT)
6746	for {
6747		y := auxIntToInt32(v.AuxInt)
6748		if v_0.Op != OpAMD64MOVLconst {
6749			break
6750		}
6751		x := auxIntToInt32(v_0.AuxInt)
6752		if !(x > y && uint32(x) < uint32(y)) {
6753			break
6754		}
6755		v.reset(OpAMD64FlagGT_ULT)
6756		return true
6757	}
6758	// match: (CMPLconst (MOVLconst [x]) [y])
6759	// cond: x>y && uint32(x)>uint32(y)
6760	// result: (FlagGT_UGT)
6761	for {
6762		y := auxIntToInt32(v.AuxInt)
6763		if v_0.Op != OpAMD64MOVLconst {
6764			break
6765		}
6766		x := auxIntToInt32(v_0.AuxInt)
6767		if !(x > y && uint32(x) > uint32(y)) {
6768			break
6769		}
6770		v.reset(OpAMD64FlagGT_UGT)
6771		return true
6772	}
6773	// match: (CMPLconst (SHRLconst _ [c]) [n])
6774	// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
6775	// result: (FlagLT_ULT)
6776	for {
6777		n := auxIntToInt32(v.AuxInt)
6778		if v_0.Op != OpAMD64SHRLconst {
6779			break
6780		}
6781		c := auxIntToInt8(v_0.AuxInt)
6782		if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
6783			break
6784		}
6785		v.reset(OpAMD64FlagLT_ULT)
6786		return true
6787	}
6788	// match: (CMPLconst (ANDLconst _ [m]) [n])
6789	// cond: 0 <= m && m < n
6790	// result: (FlagLT_ULT)
6791	for {
6792		n := auxIntToInt32(v.AuxInt)
6793		if v_0.Op != OpAMD64ANDLconst {
6794			break
6795		}
6796		m := auxIntToInt32(v_0.AuxInt)
6797		if !(0 <= m && m < n) {
6798			break
6799		}
6800		v.reset(OpAMD64FlagLT_ULT)
6801		return true
6802	}
6803	// match: (CMPLconst a:(ANDL x y) [0])
6804	// cond: a.Uses == 1
6805	// result: (TESTL x y)
6806	for {
6807		if auxIntToInt32(v.AuxInt) != 0 {
6808			break
6809		}
6810		a := v_0
6811		if a.Op != OpAMD64ANDL {
6812			break
6813		}
6814		y := a.Args[1]
6815		x := a.Args[0]
6816		if !(a.Uses == 1) {
6817			break
6818		}
6819		v.reset(OpAMD64TESTL)
6820		v.AddArg2(x, y)
6821		return true
6822	}
6823	// match: (CMPLconst a:(ANDLconst [c] x) [0])
6824	// cond: a.Uses == 1
6825	// result: (TESTLconst [c] x)
6826	for {
6827		if auxIntToInt32(v.AuxInt) != 0 {
6828			break
6829		}
6830		a := v_0
6831		if a.Op != OpAMD64ANDLconst {
6832			break
6833		}
6834		c := auxIntToInt32(a.AuxInt)
6835		x := a.Args[0]
6836		if !(a.Uses == 1) {
6837			break
6838		}
6839		v.reset(OpAMD64TESTLconst)
6840		v.AuxInt = int32ToAuxInt(c)
6841		v.AddArg(x)
6842		return true
6843	}
6844	// match: (CMPLconst x [0])
6845	// result: (TESTL x x)
6846	for {
6847		if auxIntToInt32(v.AuxInt) != 0 {
6848			break
6849		}
6850		x := v_0
6851		v.reset(OpAMD64TESTL)
6852		v.AddArg2(x, x)
6853		return true
6854	}
6855	// match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
6856	// cond: l.Uses == 1 && clobber(l)
6857	// result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
6858	for {
6859		c := auxIntToInt32(v.AuxInt)
6860		l := v_0
6861		if l.Op != OpAMD64MOVLload {
6862			break
6863		}
6864		off := auxIntToInt32(l.AuxInt)
6865		sym := auxToSym(l.Aux)
6866		mem := l.Args[1]
6867		ptr := l.Args[0]
6868		if !(l.Uses == 1 && clobber(l)) {
6869			break
6870		}
6871		b = l.Block
6872		v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
6873		v.copyOf(v0)
6874		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
6875		v0.Aux = symToAux(sym)
6876		v0.AddArg2(ptr, mem)
6877		return true
6878	}
6879	return false
6880}
6881func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
6882	v_1 := v.Args[1]
6883	v_0 := v.Args[0]
6884	// match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
6885	// cond: ValAndOff(valoff1).canAdd32(off2)
6886	// result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
6887	for {
6888		valoff1 := auxIntToValAndOff(v.AuxInt)
6889		sym := auxToSym(v.Aux)
6890		if v_0.Op != OpAMD64ADDQconst {
6891			break
6892		}
6893		off2 := auxIntToInt32(v_0.AuxInt)
6894		base := v_0.Args[0]
6895		mem := v_1
6896		if !(ValAndOff(valoff1).canAdd32(off2)) {
6897			break
6898		}
6899		v.reset(OpAMD64CMPLconstload)
6900		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6901		v.Aux = symToAux(sym)
6902		v.AddArg2(base, mem)
6903		return true
6904	}
6905	// match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
6906	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
6907	// result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
6908	for {
6909		valoff1 := auxIntToValAndOff(v.AuxInt)
6910		sym1 := auxToSym(v.Aux)
6911		if v_0.Op != OpAMD64LEAQ {
6912			break
6913		}
6914		off2 := auxIntToInt32(v_0.AuxInt)
6915		sym2 := auxToSym(v_0.Aux)
6916		base := v_0.Args[0]
6917		mem := v_1
6918		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6919			break
6920		}
6921		v.reset(OpAMD64CMPLconstload)
6922		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6923		v.Aux = symToAux(mergeSym(sym1, sym2))
6924		v.AddArg2(base, mem)
6925		return true
6926	}
6927	return false
6928}
6929func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
6930	v_2 := v.Args[2]
6931	v_1 := v.Args[1]
6932	v_0 := v.Args[0]
6933	// match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem)
6934	// cond: is32Bit(int64(off1)+int64(off2))
6935	// result: (CMPLload [off1+off2] {sym} base val mem)
6936	for {
6937		off1 := auxIntToInt32(v.AuxInt)
6938		sym := auxToSym(v.Aux)
6939		if v_0.Op != OpAMD64ADDQconst {
6940			break
6941		}
6942		off2 := auxIntToInt32(v_0.AuxInt)
6943		base := v_0.Args[0]
6944		val := v_1
6945		mem := v_2
6946		if !(is32Bit(int64(off1) + int64(off2))) {
6947			break
6948		}
6949		v.reset(OpAMD64CMPLload)
6950		v.AuxInt = int32ToAuxInt(off1 + off2)
6951		v.Aux = symToAux(sym)
6952		v.AddArg3(base, val, mem)
6953		return true
6954	}
6955	// match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
6956	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
6957	// result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
6958	for {
6959		off1 := auxIntToInt32(v.AuxInt)
6960		sym1 := auxToSym(v.Aux)
6961		if v_0.Op != OpAMD64LEAQ {
6962			break
6963		}
6964		off2 := auxIntToInt32(v_0.AuxInt)
6965		sym2 := auxToSym(v_0.Aux)
6966		base := v_0.Args[0]
6967		val := v_1
6968		mem := v_2
6969		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6970			break
6971		}
6972		v.reset(OpAMD64CMPLload)
6973		v.AuxInt = int32ToAuxInt(off1 + off2)
6974		v.Aux = symToAux(mergeSym(sym1, sym2))
6975		v.AddArg3(base, val, mem)
6976		return true
6977	}
6978	// match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
6979	// result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
6980	for {
6981		off := auxIntToInt32(v.AuxInt)
6982		sym := auxToSym(v.Aux)
6983		ptr := v_0
6984		if v_1.Op != OpAMD64MOVLconst {
6985			break
6986		}
6987		c := auxIntToInt32(v_1.AuxInt)
6988		mem := v_2
6989		v.reset(OpAMD64CMPLconstload)
6990		v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
6991		v.Aux = symToAux(sym)
6992		v.AddArg2(ptr, mem)
6993		return true
6994	}
6995	return false
6996}
6997func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
6998	v_1 := v.Args[1]
6999	v_0 := v.Args[0]
7000	b := v.Block
7001	// match: (CMPQ x (MOVQconst [c]))
7002	// cond: is32Bit(c)
7003	// result: (CMPQconst x [int32(c)])
7004	for {
7005		x := v_0
7006		if v_1.Op != OpAMD64MOVQconst {
7007			break
7008		}
7009		c := auxIntToInt64(v_1.AuxInt)
7010		if !(is32Bit(c)) {
7011			break
7012		}
7013		v.reset(OpAMD64CMPQconst)
7014		v.AuxInt = int32ToAuxInt(int32(c))
7015		v.AddArg(x)
7016		return true
7017	}
7018	// match: (CMPQ (MOVQconst [c]) x)
7019	// cond: is32Bit(c)
7020	// result: (InvertFlags (CMPQconst x [int32(c)]))
7021	for {
7022		if v_0.Op != OpAMD64MOVQconst {
7023			break
7024		}
7025		c := auxIntToInt64(v_0.AuxInt)
7026		x := v_1
7027		if !(is32Bit(c)) {
7028			break
7029		}
7030		v.reset(OpAMD64InvertFlags)
7031		v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7032		v0.AuxInt = int32ToAuxInt(int32(c))
7033		v0.AddArg(x)
7034		v.AddArg(v0)
7035		return true
7036	}
7037	// match: (CMPQ x y)
7038	// cond: canonLessThan(x,y)
7039	// result: (InvertFlags (CMPQ y x))
7040	for {
7041		x := v_0
7042		y := v_1
7043		if !(canonLessThan(x, y)) {
7044			break
7045		}
7046		v.reset(OpAMD64InvertFlags)
7047		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7048		v0.AddArg2(y, x)
7049		v.AddArg(v0)
7050		return true
7051	}
7052	// match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7053	// cond: x==y
7054	// result: (FlagEQ)
7055	for {
7056		if v_0.Op != OpAMD64MOVQconst {
7057			break
7058		}
7059		x := auxIntToInt64(v_0.AuxInt)
7060		if v_1.Op != OpAMD64MOVQconst {
7061			break
7062		}
7063		y := auxIntToInt64(v_1.AuxInt)
7064		if !(x == y) {
7065			break
7066		}
7067		v.reset(OpAMD64FlagEQ)
7068		return true
7069	}
7070	// match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7071	// cond: x<y && uint64(x)<uint64(y)
7072	// result: (FlagLT_ULT)
7073	for {
7074		if v_0.Op != OpAMD64MOVQconst {
7075			break
7076		}
7077		x := auxIntToInt64(v_0.AuxInt)
7078		if v_1.Op != OpAMD64MOVQconst {
7079			break
7080		}
7081		y := auxIntToInt64(v_1.AuxInt)
7082		if !(x < y && uint64(x) < uint64(y)) {
7083			break
7084		}
7085		v.reset(OpAMD64FlagLT_ULT)
7086		return true
7087	}
7088	// match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7089	// cond: x<y && uint64(x)>uint64(y)
7090	// result: (FlagLT_UGT)
7091	for {
7092		if v_0.Op != OpAMD64MOVQconst {
7093			break
7094		}
7095		x := auxIntToInt64(v_0.AuxInt)
7096		if v_1.Op != OpAMD64MOVQconst {
7097			break
7098		}
7099		y := auxIntToInt64(v_1.AuxInt)
7100		if !(x < y && uint64(x) > uint64(y)) {
7101			break
7102		}
7103		v.reset(OpAMD64FlagLT_UGT)
7104		return true
7105	}
7106	// match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7107	// cond: x>y && uint64(x)<uint64(y)
7108	// result: (FlagGT_ULT)
7109	for {
7110		if v_0.Op != OpAMD64MOVQconst {
7111			break
7112		}
7113		x := auxIntToInt64(v_0.AuxInt)
7114		if v_1.Op != OpAMD64MOVQconst {
7115			break
7116		}
7117		y := auxIntToInt64(v_1.AuxInt)
7118		if !(x > y && uint64(x) < uint64(y)) {
7119			break
7120		}
7121		v.reset(OpAMD64FlagGT_ULT)
7122		return true
7123	}
7124	// match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7125	// cond: x>y && uint64(x)>uint64(y)
7126	// result: (FlagGT_UGT)
7127	for {
7128		if v_0.Op != OpAMD64MOVQconst {
7129			break
7130		}
7131		x := auxIntToInt64(v_0.AuxInt)
7132		if v_1.Op != OpAMD64MOVQconst {
7133			break
7134		}
7135		y := auxIntToInt64(v_1.AuxInt)
7136		if !(x > y && uint64(x) > uint64(y)) {
7137			break
7138		}
7139		v.reset(OpAMD64FlagGT_UGT)
7140		return true
7141	}
7142	// match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
7143	// cond: canMergeLoad(v, l) && clobber(l)
7144	// result: (CMPQload {sym} [off] ptr x mem)
7145	for {
7146		l := v_0
7147		if l.Op != OpAMD64MOVQload {
7148			break
7149		}
7150		off := auxIntToInt32(l.AuxInt)
7151		sym := auxToSym(l.Aux)
7152		mem := l.Args[1]
7153		ptr := l.Args[0]
7154		x := v_1
7155		if !(canMergeLoad(v, l) && clobber(l)) {
7156			break
7157		}
7158		v.reset(OpAMD64CMPQload)
7159		v.AuxInt = int32ToAuxInt(off)
7160		v.Aux = symToAux(sym)
7161		v.AddArg3(ptr, x, mem)
7162		return true
7163	}
7164	// match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
7165	// cond: canMergeLoad(v, l) && clobber(l)
7166	// result: (InvertFlags (CMPQload {sym} [off] ptr x mem))
7167	for {
7168		x := v_0
7169		l := v_1
7170		if l.Op != OpAMD64MOVQload {
7171			break
7172		}
7173		off := auxIntToInt32(l.AuxInt)
7174		sym := auxToSym(l.Aux)
7175		mem := l.Args[1]
7176		ptr := l.Args[0]
7177		if !(canMergeLoad(v, l) && clobber(l)) {
7178			break
7179		}
7180		v.reset(OpAMD64InvertFlags)
7181		v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7182		v0.AuxInt = int32ToAuxInt(off)
7183		v0.Aux = symToAux(sym)
7184		v0.AddArg3(ptr, x, mem)
7185		v.AddArg(v0)
7186		return true
7187	}
7188	return false
7189}
7190func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7191	v_0 := v.Args[0]
7192	b := v.Block
7193	// match: (CMPQconst (MOVQconst [x]) [y])
7194	// cond: x==int64(y)
7195	// result: (FlagEQ)
7196	for {
7197		y := auxIntToInt32(v.AuxInt)
7198		if v_0.Op != OpAMD64MOVQconst {
7199			break
7200		}
7201		x := auxIntToInt64(v_0.AuxInt)
7202		if !(x == int64(y)) {
7203			break
7204		}
7205		v.reset(OpAMD64FlagEQ)
7206		return true
7207	}
7208	// match: (CMPQconst (MOVQconst [x]) [y])
7209	// cond: x<int64(y) && uint64(x)<uint64(int64(y))
7210	// result: (FlagLT_ULT)
7211	for {
7212		y := auxIntToInt32(v.AuxInt)
7213		if v_0.Op != OpAMD64MOVQconst {
7214			break
7215		}
7216		x := auxIntToInt64(v_0.AuxInt)
7217		if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7218			break
7219		}
7220		v.reset(OpAMD64FlagLT_ULT)
7221		return true
7222	}
7223	// match: (CMPQconst (MOVQconst [x]) [y])
7224	// cond: x<int64(y) && uint64(x)>uint64(int64(y))
7225	// result: (FlagLT_UGT)
7226	for {
7227		y := auxIntToInt32(v.AuxInt)
7228		if v_0.Op != OpAMD64MOVQconst {
7229			break
7230		}
7231		x := auxIntToInt64(v_0.AuxInt)
7232		if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7233			break
7234		}
7235		v.reset(OpAMD64FlagLT_UGT)
7236		return true
7237	}
7238	// match: (CMPQconst (MOVQconst [x]) [y])
7239	// cond: x>int64(y) && uint64(x)<uint64(int64(y))
7240	// result: (FlagGT_ULT)
7241	for {
7242		y := auxIntToInt32(v.AuxInt)
7243		if v_0.Op != OpAMD64MOVQconst {
7244			break
7245		}
7246		x := auxIntToInt64(v_0.AuxInt)
7247		if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7248			break
7249		}
7250		v.reset(OpAMD64FlagGT_ULT)
7251		return true
7252	}
7253	// match: (CMPQconst (MOVQconst [x]) [y])
7254	// cond: x>int64(y) && uint64(x)>uint64(int64(y))
7255	// result: (FlagGT_UGT)
7256	for {
7257		y := auxIntToInt32(v.AuxInt)
7258		if v_0.Op != OpAMD64MOVQconst {
7259			break
7260		}
7261		x := auxIntToInt64(v_0.AuxInt)
7262		if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7263			break
7264		}
7265		v.reset(OpAMD64FlagGT_UGT)
7266		return true
7267	}
7268	// match: (CMPQconst (MOVBQZX _) [c])
7269	// cond: 0xFF < c
7270	// result: (FlagLT_ULT)
7271	for {
7272		c := auxIntToInt32(v.AuxInt)
7273		if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7274			break
7275		}
7276		v.reset(OpAMD64FlagLT_ULT)
7277		return true
7278	}
7279	// match: (CMPQconst (MOVWQZX _) [c])
7280	// cond: 0xFFFF < c
7281	// result: (FlagLT_ULT)
7282	for {
7283		c := auxIntToInt32(v.AuxInt)
7284		if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7285			break
7286		}
7287		v.reset(OpAMD64FlagLT_ULT)
7288		return true
7289	}
7290	// match: (CMPQconst (SHRQconst _ [c]) [n])
7291	// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
7292	// result: (FlagLT_ULT)
7293	for {
7294		n := auxIntToInt32(v.AuxInt)
7295		if v_0.Op != OpAMD64SHRQconst {
7296			break
7297		}
7298		c := auxIntToInt8(v_0.AuxInt)
7299		if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7300			break
7301		}
7302		v.reset(OpAMD64FlagLT_ULT)
7303		return true
7304	}
7305	// match: (CMPQconst (ANDQconst _ [m]) [n])
7306	// cond: 0 <= m && m < n
7307	// result: (FlagLT_ULT)
7308	for {
7309		n := auxIntToInt32(v.AuxInt)
7310		if v_0.Op != OpAMD64ANDQconst {
7311			break
7312		}
7313		m := auxIntToInt32(v_0.AuxInt)
7314		if !(0 <= m && m < n) {
7315			break
7316		}
7317		v.reset(OpAMD64FlagLT_ULT)
7318		return true
7319	}
7320	// match: (CMPQconst (ANDLconst _ [m]) [n])
7321	// cond: 0 <= m && m < n
7322	// result: (FlagLT_ULT)
7323	for {
7324		n := auxIntToInt32(v.AuxInt)
7325		if v_0.Op != OpAMD64ANDLconst {
7326			break
7327		}
7328		m := auxIntToInt32(v_0.AuxInt)
7329		if !(0 <= m && m < n) {
7330			break
7331		}
7332		v.reset(OpAMD64FlagLT_ULT)
7333		return true
7334	}
7335	// match: (CMPQconst a:(ANDQ x y) [0])
7336	// cond: a.Uses == 1
7337	// result: (TESTQ x y)
7338	for {
7339		if auxIntToInt32(v.AuxInt) != 0 {
7340			break
7341		}
7342		a := v_0
7343		if a.Op != OpAMD64ANDQ {
7344			break
7345		}
7346		y := a.Args[1]
7347		x := a.Args[0]
7348		if !(a.Uses == 1) {
7349			break
7350		}
7351		v.reset(OpAMD64TESTQ)
7352		v.AddArg2(x, y)
7353		return true
7354	}
7355	// match: (CMPQconst a:(ANDQconst [c] x) [0])
7356	// cond: a.Uses == 1
7357	// result: (TESTQconst [c] x)
7358	for {
7359		if auxIntToInt32(v.AuxInt) != 0 {
7360			break
7361		}
7362		a := v_0
7363		if a.Op != OpAMD64ANDQconst {
7364			break
7365		}
7366		c := auxIntToInt32(a.AuxInt)
7367		x := a.Args[0]
7368		if !(a.Uses == 1) {
7369			break
7370		}
7371		v.reset(OpAMD64TESTQconst)
7372		v.AuxInt = int32ToAuxInt(c)
7373		v.AddArg(x)
7374		return true
7375	}
7376	// match: (CMPQconst x [0])
7377	// result: (TESTQ x x)
7378	for {
7379		if auxIntToInt32(v.AuxInt) != 0 {
7380			break
7381		}
7382		x := v_0
7383		v.reset(OpAMD64TESTQ)
7384		v.AddArg2(x, x)
7385		return true
7386	}
7387	// match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
7388	// cond: l.Uses == 1 && clobber(l)
7389	// result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
7390	for {
7391		c := auxIntToInt32(v.AuxInt)
7392		l := v_0
7393		if l.Op != OpAMD64MOVQload {
7394			break
7395		}
7396		off := auxIntToInt32(l.AuxInt)
7397		sym := auxToSym(l.Aux)
7398		mem := l.Args[1]
7399		ptr := l.Args[0]
7400		if !(l.Uses == 1 && clobber(l)) {
7401			break
7402		}
7403		b = l.Block
7404		v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7405		v.copyOf(v0)
7406		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7407		v0.Aux = symToAux(sym)
7408		v0.AddArg2(ptr, mem)
7409		return true
7410	}
7411	return false
7412}
7413func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7414	v_1 := v.Args[1]
7415	v_0 := v.Args[0]
7416	// match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
7417	// cond: ValAndOff(valoff1).canAdd32(off2)
7418	// result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
7419	for {
7420		valoff1 := auxIntToValAndOff(v.AuxInt)
7421		sym := auxToSym(v.Aux)
7422		if v_0.Op != OpAMD64ADDQconst {
7423			break
7424		}
7425		off2 := auxIntToInt32(v_0.AuxInt)
7426		base := v_0.Args[0]
7427		mem := v_1
7428		if !(ValAndOff(valoff1).canAdd32(off2)) {
7429			break
7430		}
7431		v.reset(OpAMD64CMPQconstload)
7432		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7433		v.Aux = symToAux(sym)
7434		v.AddArg2(base, mem)
7435		return true
7436	}
7437	// match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
7438	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
7439	// result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
7440	for {
7441		valoff1 := auxIntToValAndOff(v.AuxInt)
7442		sym1 := auxToSym(v.Aux)
7443		if v_0.Op != OpAMD64LEAQ {
7444			break
7445		}
7446		off2 := auxIntToInt32(v_0.AuxInt)
7447		sym2 := auxToSym(v_0.Aux)
7448		base := v_0.Args[0]
7449		mem := v_1
7450		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7451			break
7452		}
7453		v.reset(OpAMD64CMPQconstload)
7454		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7455		v.Aux = symToAux(mergeSym(sym1, sym2))
7456		v.AddArg2(base, mem)
7457		return true
7458	}
7459	return false
7460}
7461func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7462	v_2 := v.Args[2]
7463	v_1 := v.Args[1]
7464	v_0 := v.Args[0]
7465	// match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem)
7466	// cond: is32Bit(int64(off1)+int64(off2))
7467	// result: (CMPQload [off1+off2] {sym} base val mem)
7468	for {
7469		off1 := auxIntToInt32(v.AuxInt)
7470		sym := auxToSym(v.Aux)
7471		if v_0.Op != OpAMD64ADDQconst {
7472			break
7473		}
7474		off2 := auxIntToInt32(v_0.AuxInt)
7475		base := v_0.Args[0]
7476		val := v_1
7477		mem := v_2
7478		if !(is32Bit(int64(off1) + int64(off2))) {
7479			break
7480		}
7481		v.reset(OpAMD64CMPQload)
7482		v.AuxInt = int32ToAuxInt(off1 + off2)
7483		v.Aux = symToAux(sym)
7484		v.AddArg3(base, val, mem)
7485		return true
7486	}
7487	// match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
7488	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
7489	// result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
7490	for {
7491		off1 := auxIntToInt32(v.AuxInt)
7492		sym1 := auxToSym(v.Aux)
7493		if v_0.Op != OpAMD64LEAQ {
7494			break
7495		}
7496		off2 := auxIntToInt32(v_0.AuxInt)
7497		sym2 := auxToSym(v_0.Aux)
7498		base := v_0.Args[0]
7499		val := v_1
7500		mem := v_2
7501		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7502			break
7503		}
7504		v.reset(OpAMD64CMPQload)
7505		v.AuxInt = int32ToAuxInt(off1 + off2)
7506		v.Aux = symToAux(mergeSym(sym1, sym2))
7507		v.AddArg3(base, val, mem)
7508		return true
7509	}
7510	// match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
7511	// cond: validVal(c)
7512	// result: (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
7513	for {
7514		off := auxIntToInt32(v.AuxInt)
7515		sym := auxToSym(v.Aux)
7516		ptr := v_0
7517		if v_1.Op != OpAMD64MOVQconst {
7518			break
7519		}
7520		c := auxIntToInt64(v_1.AuxInt)
7521		mem := v_2
7522		if !(validVal(c)) {
7523			break
7524		}
7525		v.reset(OpAMD64CMPQconstload)
7526		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7527		v.Aux = symToAux(sym)
7528		v.AddArg2(ptr, mem)
7529		return true
7530	}
7531	return false
7532}
7533func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
7534	v_1 := v.Args[1]
7535	v_0 := v.Args[0]
7536	b := v.Block
7537	// match: (CMPW x (MOVLconst [c]))
7538	// result: (CMPWconst x [int16(c)])
7539	for {
7540		x := v_0
7541		if v_1.Op != OpAMD64MOVLconst {
7542			break
7543		}
7544		c := auxIntToInt32(v_1.AuxInt)
7545		v.reset(OpAMD64CMPWconst)
7546		v.AuxInt = int16ToAuxInt(int16(c))
7547		v.AddArg(x)
7548		return true
7549	}
7550	// match: (CMPW (MOVLconst [c]) x)
7551	// result: (InvertFlags (CMPWconst x [int16(c)]))
7552	for {
7553		if v_0.Op != OpAMD64MOVLconst {
7554			break
7555		}
7556		c := auxIntToInt32(v_0.AuxInt)
7557		x := v_1
7558		v.reset(OpAMD64InvertFlags)
7559		v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
7560		v0.AuxInt = int16ToAuxInt(int16(c))
7561		v0.AddArg(x)
7562		v.AddArg(v0)
7563		return true
7564	}
7565	// match: (CMPW x y)
7566	// cond: canonLessThan(x,y)
7567	// result: (InvertFlags (CMPW y x))
7568	for {
7569		x := v_0
7570		y := v_1
7571		if !(canonLessThan(x, y)) {
7572			break
7573		}
7574		v.reset(OpAMD64InvertFlags)
7575		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
7576		v0.AddArg2(y, x)
7577		v.AddArg(v0)
7578		return true
7579	}
7580	// match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
7581	// cond: canMergeLoad(v, l) && clobber(l)
7582	// result: (CMPWload {sym} [off] ptr x mem)
7583	for {
7584		l := v_0
7585		if l.Op != OpAMD64MOVWload {
7586			break
7587		}
7588		off := auxIntToInt32(l.AuxInt)
7589		sym := auxToSym(l.Aux)
7590		mem := l.Args[1]
7591		ptr := l.Args[0]
7592		x := v_1
7593		if !(canMergeLoad(v, l) && clobber(l)) {
7594			break
7595		}
7596		v.reset(OpAMD64CMPWload)
7597		v.AuxInt = int32ToAuxInt(off)
7598		v.Aux = symToAux(sym)
7599		v.AddArg3(ptr, x, mem)
7600		return true
7601	}
7602	// match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
7603	// cond: canMergeLoad(v, l) && clobber(l)
7604	// result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
7605	for {
7606		x := v_0
7607		l := v_1
7608		if l.Op != OpAMD64MOVWload {
7609			break
7610		}
7611		off := auxIntToInt32(l.AuxInt)
7612		sym := auxToSym(l.Aux)
7613		mem := l.Args[1]
7614		ptr := l.Args[0]
7615		if !(canMergeLoad(v, l) && clobber(l)) {
7616			break
7617		}
7618		v.reset(OpAMD64InvertFlags)
7619		v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
7620		v0.AuxInt = int32ToAuxInt(off)
7621		v0.Aux = symToAux(sym)
7622		v0.AddArg3(ptr, x, mem)
7623		v.AddArg(v0)
7624		return true
7625	}
7626	return false
7627}
7628func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
7629	v_0 := v.Args[0]
7630	b := v.Block
7631	// match: (CMPWconst (MOVLconst [x]) [y])
7632	// cond: int16(x)==y
7633	// result: (FlagEQ)
7634	for {
7635		y := auxIntToInt16(v.AuxInt)
7636		if v_0.Op != OpAMD64MOVLconst {
7637			break
7638		}
7639		x := auxIntToInt32(v_0.AuxInt)
7640		if !(int16(x) == y) {
7641			break
7642		}
7643		v.reset(OpAMD64FlagEQ)
7644		return true
7645	}
7646	// match: (CMPWconst (MOVLconst [x]) [y])
7647	// cond: int16(x)<y && uint16(x)<uint16(y)
7648	// result: (FlagLT_ULT)
7649	for {
7650		y := auxIntToInt16(v.AuxInt)
7651		if v_0.Op != OpAMD64MOVLconst {
7652			break
7653		}
7654		x := auxIntToInt32(v_0.AuxInt)
7655		if !(int16(x) < y && uint16(x) < uint16(y)) {
7656			break
7657		}
7658		v.reset(OpAMD64FlagLT_ULT)
7659		return true
7660	}
7661	// match: (CMPWconst (MOVLconst [x]) [y])
7662	// cond: int16(x)<y && uint16(x)>uint16(y)
7663	// result: (FlagLT_UGT)
7664	for {
7665		y := auxIntToInt16(v.AuxInt)
7666		if v_0.Op != OpAMD64MOVLconst {
7667			break
7668		}
7669		x := auxIntToInt32(v_0.AuxInt)
7670		if !(int16(x) < y && uint16(x) > uint16(y)) {
7671			break
7672		}
7673		v.reset(OpAMD64FlagLT_UGT)
7674		return true
7675	}
7676	// match: (CMPWconst (MOVLconst [x]) [y])
7677	// cond: int16(x)>y && uint16(x)<uint16(y)
7678	// result: (FlagGT_ULT)
7679	for {
7680		y := auxIntToInt16(v.AuxInt)
7681		if v_0.Op != OpAMD64MOVLconst {
7682			break
7683		}
7684		x := auxIntToInt32(v_0.AuxInt)
7685		if !(int16(x) > y && uint16(x) < uint16(y)) {
7686			break
7687		}
7688		v.reset(OpAMD64FlagGT_ULT)
7689		return true
7690	}
7691	// match: (CMPWconst (MOVLconst [x]) [y])
7692	// cond: int16(x)>y && uint16(x)>uint16(y)
7693	// result: (FlagGT_UGT)
7694	for {
7695		y := auxIntToInt16(v.AuxInt)
7696		if v_0.Op != OpAMD64MOVLconst {
7697			break
7698		}
7699		x := auxIntToInt32(v_0.AuxInt)
7700		if !(int16(x) > y && uint16(x) > uint16(y)) {
7701			break
7702		}
7703		v.reset(OpAMD64FlagGT_UGT)
7704		return true
7705	}
7706	// match: (CMPWconst (ANDLconst _ [m]) [n])
7707	// cond: 0 <= int16(m) && int16(m) < n
7708	// result: (FlagLT_ULT)
7709	for {
7710		n := auxIntToInt16(v.AuxInt)
7711		if v_0.Op != OpAMD64ANDLconst {
7712			break
7713		}
7714		m := auxIntToInt32(v_0.AuxInt)
7715		if !(0 <= int16(m) && int16(m) < n) {
7716			break
7717		}
7718		v.reset(OpAMD64FlagLT_ULT)
7719		return true
7720	}
7721	// match: (CMPWconst a:(ANDL x y) [0])
7722	// cond: a.Uses == 1
7723	// result: (TESTW x y)
7724	for {
7725		if auxIntToInt16(v.AuxInt) != 0 {
7726			break
7727		}
7728		a := v_0
7729		if a.Op != OpAMD64ANDL {
7730			break
7731		}
7732		y := a.Args[1]
7733		x := a.Args[0]
7734		if !(a.Uses == 1) {
7735			break
7736		}
7737		v.reset(OpAMD64TESTW)
7738		v.AddArg2(x, y)
7739		return true
7740	}
7741	// match: (CMPWconst a:(ANDLconst [c] x) [0])
7742	// cond: a.Uses == 1
7743	// result: (TESTWconst [int16(c)] x)
7744	for {
7745		if auxIntToInt16(v.AuxInt) != 0 {
7746			break
7747		}
7748		a := v_0
7749		if a.Op != OpAMD64ANDLconst {
7750			break
7751		}
7752		c := auxIntToInt32(a.AuxInt)
7753		x := a.Args[0]
7754		if !(a.Uses == 1) {
7755			break
7756		}
7757		v.reset(OpAMD64TESTWconst)
7758		v.AuxInt = int16ToAuxInt(int16(c))
7759		v.AddArg(x)
7760		return true
7761	}
7762	// match: (CMPWconst x [0])
7763	// result: (TESTW x x)
7764	for {
7765		if auxIntToInt16(v.AuxInt) != 0 {
7766			break
7767		}
7768		x := v_0
7769		v.reset(OpAMD64TESTW)
7770		v.AddArg2(x, x)
7771		return true
7772	}
7773	// match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
7774	// cond: l.Uses == 1 && clobber(l)
7775	// result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
7776	for {
7777		c := auxIntToInt16(v.AuxInt)
7778		l := v_0
7779		if l.Op != OpAMD64MOVWload {
7780			break
7781		}
7782		off := auxIntToInt32(l.AuxInt)
7783		sym := auxToSym(l.Aux)
7784		mem := l.Args[1]
7785		ptr := l.Args[0]
7786		if !(l.Uses == 1 && clobber(l)) {
7787			break
7788		}
7789		b = l.Block
7790		v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
7791		v.copyOf(v0)
7792		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7793		v0.Aux = symToAux(sym)
7794		v0.AddArg2(ptr, mem)
7795		return true
7796	}
7797	return false
7798}
7799func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
7800	v_1 := v.Args[1]
7801	v_0 := v.Args[0]
7802	// match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
7803	// cond: ValAndOff(valoff1).canAdd32(off2)
7804	// result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
7805	for {
7806		valoff1 := auxIntToValAndOff(v.AuxInt)
7807		sym := auxToSym(v.Aux)
7808		if v_0.Op != OpAMD64ADDQconst {
7809			break
7810		}
7811		off2 := auxIntToInt32(v_0.AuxInt)
7812		base := v_0.Args[0]
7813		mem := v_1
7814		if !(ValAndOff(valoff1).canAdd32(off2)) {
7815			break
7816		}
7817		v.reset(OpAMD64CMPWconstload)
7818		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7819		v.Aux = symToAux(sym)
7820		v.AddArg2(base, mem)
7821		return true
7822	}
7823	// match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
7824	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
7825	// result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
7826	for {
7827		valoff1 := auxIntToValAndOff(v.AuxInt)
7828		sym1 := auxToSym(v.Aux)
7829		if v_0.Op != OpAMD64LEAQ {
7830			break
7831		}
7832		off2 := auxIntToInt32(v_0.AuxInt)
7833		sym2 := auxToSym(v_0.Aux)
7834		base := v_0.Args[0]
7835		mem := v_1
7836		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7837			break
7838		}
7839		v.reset(OpAMD64CMPWconstload)
7840		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7841		v.Aux = symToAux(mergeSym(sym1, sym2))
7842		v.AddArg2(base, mem)
7843		return true
7844	}
7845	return false
7846}
7847func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
7848	v_2 := v.Args[2]
7849	v_1 := v.Args[1]
7850	v_0 := v.Args[0]
7851	// match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem)
7852	// cond: is32Bit(int64(off1)+int64(off2))
7853	// result: (CMPWload [off1+off2] {sym} base val mem)
7854	for {
7855		off1 := auxIntToInt32(v.AuxInt)
7856		sym := auxToSym(v.Aux)
7857		if v_0.Op != OpAMD64ADDQconst {
7858			break
7859		}
7860		off2 := auxIntToInt32(v_0.AuxInt)
7861		base := v_0.Args[0]
7862		val := v_1
7863		mem := v_2
7864		if !(is32Bit(int64(off1) + int64(off2))) {
7865			break
7866		}
7867		v.reset(OpAMD64CMPWload)
7868		v.AuxInt = int32ToAuxInt(off1 + off2)
7869		v.Aux = symToAux(sym)
7870		v.AddArg3(base, val, mem)
7871		return true
7872	}
7873	// match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
7874	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
7875	// result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
7876	for {
7877		off1 := auxIntToInt32(v.AuxInt)
7878		sym1 := auxToSym(v.Aux)
7879		if v_0.Op != OpAMD64LEAQ {
7880			break
7881		}
7882		off2 := auxIntToInt32(v_0.AuxInt)
7883		sym2 := auxToSym(v_0.Aux)
7884		base := v_0.Args[0]
7885		val := v_1
7886		mem := v_2
7887		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7888			break
7889		}
7890		v.reset(OpAMD64CMPWload)
7891		v.AuxInt = int32ToAuxInt(off1 + off2)
7892		v.Aux = symToAux(mergeSym(sym1, sym2))
7893		v.AddArg3(base, val, mem)
7894		return true
7895	}
7896	// match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
7897	// result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
7898	for {
7899		off := auxIntToInt32(v.AuxInt)
7900		sym := auxToSym(v.Aux)
7901		ptr := v_0
7902		if v_1.Op != OpAMD64MOVLconst {
7903			break
7904		}
7905		c := auxIntToInt32(v_1.AuxInt)
7906		mem := v_2
7907		v.reset(OpAMD64CMPWconstload)
7908		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
7909		v.Aux = symToAux(sym)
7910		v.AddArg2(ptr, mem)
7911		return true
7912	}
7913	return false
7914}
7915func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
7916	v_3 := v.Args[3]
7917	v_2 := v.Args[2]
7918	v_1 := v.Args[1]
7919	v_0 := v.Args[0]
7920	// match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
7921	// cond: is32Bit(int64(off1)+int64(off2))
7922	// result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
7923	for {
7924		off1 := auxIntToInt32(v.AuxInt)
7925		sym := auxToSym(v.Aux)
7926		if v_0.Op != OpAMD64ADDQconst {
7927			break
7928		}
7929		off2 := auxIntToInt32(v_0.AuxInt)
7930		ptr := v_0.Args[0]
7931		old := v_1
7932		new_ := v_2
7933		mem := v_3
7934		if !(is32Bit(int64(off1) + int64(off2))) {
7935			break
7936		}
7937		v.reset(OpAMD64CMPXCHGLlock)
7938		v.AuxInt = int32ToAuxInt(off1 + off2)
7939		v.Aux = symToAux(sym)
7940		v.AddArg4(ptr, old, new_, mem)
7941		return true
7942	}
7943	return false
7944}
7945func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
7946	v_3 := v.Args[3]
7947	v_2 := v.Args[2]
7948	v_1 := v.Args[1]
7949	v_0 := v.Args[0]
7950	// match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
7951	// cond: is32Bit(int64(off1)+int64(off2))
7952	// result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
7953	for {
7954		off1 := auxIntToInt32(v.AuxInt)
7955		sym := auxToSym(v.Aux)
7956		if v_0.Op != OpAMD64ADDQconst {
7957			break
7958		}
7959		off2 := auxIntToInt32(v_0.AuxInt)
7960		ptr := v_0.Args[0]
7961		old := v_1
7962		new_ := v_2
7963		mem := v_3
7964		if !(is32Bit(int64(off1) + int64(off2))) {
7965			break
7966		}
7967		v.reset(OpAMD64CMPXCHGQlock)
7968		v.AuxInt = int32ToAuxInt(off1 + off2)
7969		v.Aux = symToAux(sym)
7970		v.AddArg4(ptr, old, new_, mem)
7971		return true
7972	}
7973	return false
7974}
7975func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
7976	v_1 := v.Args[1]
7977	v_0 := v.Args[0]
7978	// match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
7979	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
7980	// result: (DIVSDload x [off] {sym} ptr mem)
7981	for {
7982		x := v_0
7983		l := v_1
7984		if l.Op != OpAMD64MOVSDload {
7985			break
7986		}
7987		off := auxIntToInt32(l.AuxInt)
7988		sym := auxToSym(l.Aux)
7989		mem := l.Args[1]
7990		ptr := l.Args[0]
7991		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
7992			break
7993		}
7994		v.reset(OpAMD64DIVSDload)
7995		v.AuxInt = int32ToAuxInt(off)
7996		v.Aux = symToAux(sym)
7997		v.AddArg3(x, ptr, mem)
7998		return true
7999	}
8000	return false
8001}
8002func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8003	v_2 := v.Args[2]
8004	v_1 := v.Args[1]
8005	v_0 := v.Args[0]
8006	// match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem)
8007	// cond: is32Bit(int64(off1)+int64(off2))
8008	// result: (DIVSDload [off1+off2] {sym} val base mem)
8009	for {
8010		off1 := auxIntToInt32(v.AuxInt)
8011		sym := auxToSym(v.Aux)
8012		val := v_0
8013		if v_1.Op != OpAMD64ADDQconst {
8014			break
8015		}
8016		off2 := auxIntToInt32(v_1.AuxInt)
8017		base := v_1.Args[0]
8018		mem := v_2
8019		if !(is32Bit(int64(off1) + int64(off2))) {
8020			break
8021		}
8022		v.reset(OpAMD64DIVSDload)
8023		v.AuxInt = int32ToAuxInt(off1 + off2)
8024		v.Aux = symToAux(sym)
8025		v.AddArg3(val, base, mem)
8026		return true
8027	}
8028	// match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
8029	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8030	// result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
8031	for {
8032		off1 := auxIntToInt32(v.AuxInt)
8033		sym1 := auxToSym(v.Aux)
8034		val := v_0
8035		if v_1.Op != OpAMD64LEAQ {
8036			break
8037		}
8038		off2 := auxIntToInt32(v_1.AuxInt)
8039		sym2 := auxToSym(v_1.Aux)
8040		base := v_1.Args[0]
8041		mem := v_2
8042		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8043			break
8044		}
8045		v.reset(OpAMD64DIVSDload)
8046		v.AuxInt = int32ToAuxInt(off1 + off2)
8047		v.Aux = symToAux(mergeSym(sym1, sym2))
8048		v.AddArg3(val, base, mem)
8049		return true
8050	}
8051	return false
8052}
8053func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8054	v_1 := v.Args[1]
8055	v_0 := v.Args[0]
8056	// match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
8057	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
8058	// result: (DIVSSload x [off] {sym} ptr mem)
8059	for {
8060		x := v_0
8061		l := v_1
8062		if l.Op != OpAMD64MOVSSload {
8063			break
8064		}
8065		off := auxIntToInt32(l.AuxInt)
8066		sym := auxToSym(l.Aux)
8067		mem := l.Args[1]
8068		ptr := l.Args[0]
8069		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8070			break
8071		}
8072		v.reset(OpAMD64DIVSSload)
8073		v.AuxInt = int32ToAuxInt(off)
8074		v.Aux = symToAux(sym)
8075		v.AddArg3(x, ptr, mem)
8076		return true
8077	}
8078	return false
8079}
8080func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8081	v_2 := v.Args[2]
8082	v_1 := v.Args[1]
8083	v_0 := v.Args[0]
8084	// match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem)
8085	// cond: is32Bit(int64(off1)+int64(off2))
8086	// result: (DIVSSload [off1+off2] {sym} val base mem)
8087	for {
8088		off1 := auxIntToInt32(v.AuxInt)
8089		sym := auxToSym(v.Aux)
8090		val := v_0
8091		if v_1.Op != OpAMD64ADDQconst {
8092			break
8093		}
8094		off2 := auxIntToInt32(v_1.AuxInt)
8095		base := v_1.Args[0]
8096		mem := v_2
8097		if !(is32Bit(int64(off1) + int64(off2))) {
8098			break
8099		}
8100		v.reset(OpAMD64DIVSSload)
8101		v.AuxInt = int32ToAuxInt(off1 + off2)
8102		v.Aux = symToAux(sym)
8103		v.AddArg3(val, base, mem)
8104		return true
8105	}
8106	// match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
8107	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8108	// result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
8109	for {
8110		off1 := auxIntToInt32(v.AuxInt)
8111		sym1 := auxToSym(v.Aux)
8112		val := v_0
8113		if v_1.Op != OpAMD64LEAQ {
8114			break
8115		}
8116		off2 := auxIntToInt32(v_1.AuxInt)
8117		sym2 := auxToSym(v_1.Aux)
8118		base := v_1.Args[0]
8119		mem := v_2
8120		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8121			break
8122		}
8123		v.reset(OpAMD64DIVSSload)
8124		v.AuxInt = int32ToAuxInt(off1 + off2)
8125		v.Aux = symToAux(mergeSym(sym1, sym2))
8126		v.AddArg3(val, base, mem)
8127		return true
8128	}
8129	return false
8130}
8131func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8132	v_1 := v.Args[1]
8133	v_0 := v.Args[0]
8134	// match: (HMULL x y)
8135	// cond: !x.rematerializeable() && y.rematerializeable()
8136	// result: (HMULL y x)
8137	for {
8138		x := v_0
8139		y := v_1
8140		if !(!x.rematerializeable() && y.rematerializeable()) {
8141			break
8142		}
8143		v.reset(OpAMD64HMULL)
8144		v.AddArg2(y, x)
8145		return true
8146	}
8147	return false
8148}
8149func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8150	v_1 := v.Args[1]
8151	v_0 := v.Args[0]
8152	// match: (HMULLU x y)
8153	// cond: !x.rematerializeable() && y.rematerializeable()
8154	// result: (HMULLU y x)
8155	for {
8156		x := v_0
8157		y := v_1
8158		if !(!x.rematerializeable() && y.rematerializeable()) {
8159			break
8160		}
8161		v.reset(OpAMD64HMULLU)
8162		v.AddArg2(y, x)
8163		return true
8164	}
8165	return false
8166}
8167func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8168	v_1 := v.Args[1]
8169	v_0 := v.Args[0]
8170	// match: (HMULQ x y)
8171	// cond: !x.rematerializeable() && y.rematerializeable()
8172	// result: (HMULQ y x)
8173	for {
8174		x := v_0
8175		y := v_1
8176		if !(!x.rematerializeable() && y.rematerializeable()) {
8177			break
8178		}
8179		v.reset(OpAMD64HMULQ)
8180		v.AddArg2(y, x)
8181		return true
8182	}
8183	return false
8184}
8185func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8186	v_1 := v.Args[1]
8187	v_0 := v.Args[0]
8188	// match: (HMULQU x y)
8189	// cond: !x.rematerializeable() && y.rematerializeable()
8190	// result: (HMULQU y x)
8191	for {
8192		x := v_0
8193		y := v_1
8194		if !(!x.rematerializeable() && y.rematerializeable()) {
8195			break
8196		}
8197		v.reset(OpAMD64HMULQU)
8198		v.AddArg2(y, x)
8199		return true
8200	}
8201	return false
8202}
8203func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8204	v_0 := v.Args[0]
8205	// match: (LEAL [c] {s} (ADDLconst [d] x))
8206	// cond: is32Bit(int64(c)+int64(d))
8207	// result: (LEAL [c+d] {s} x)
8208	for {
8209		c := auxIntToInt32(v.AuxInt)
8210		s := auxToSym(v.Aux)
8211		if v_0.Op != OpAMD64ADDLconst {
8212			break
8213		}
8214		d := auxIntToInt32(v_0.AuxInt)
8215		x := v_0.Args[0]
8216		if !(is32Bit(int64(c) + int64(d))) {
8217			break
8218		}
8219		v.reset(OpAMD64LEAL)
8220		v.AuxInt = int32ToAuxInt(c + d)
8221		v.Aux = symToAux(s)
8222		v.AddArg(x)
8223		return true
8224	}
8225	// match: (LEAL [c] {s} (ADDL x y))
8226	// cond: x.Op != OpSB && y.Op != OpSB
8227	// result: (LEAL1 [c] {s} x y)
8228	for {
8229		c := auxIntToInt32(v.AuxInt)
8230		s := auxToSym(v.Aux)
8231		if v_0.Op != OpAMD64ADDL {
8232			break
8233		}
8234		_ = v_0.Args[1]
8235		v_0_0 := v_0.Args[0]
8236		v_0_1 := v_0.Args[1]
8237		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8238			x := v_0_0
8239			y := v_0_1
8240			if !(x.Op != OpSB && y.Op != OpSB) {
8241				continue
8242			}
8243			v.reset(OpAMD64LEAL1)
8244			v.AuxInt = int32ToAuxInt(c)
8245			v.Aux = symToAux(s)
8246			v.AddArg2(x, y)
8247			return true
8248		}
8249		break
8250	}
8251	return false
8252}
8253func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8254	v_1 := v.Args[1]
8255	v_0 := v.Args[0]
8256	// match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
8257	// cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
8258	// result: (LEAL1 [c+d] {s} x y)
8259	for {
8260		c := auxIntToInt32(v.AuxInt)
8261		s := auxToSym(v.Aux)
8262		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8263			if v_0.Op != OpAMD64ADDLconst {
8264				continue
8265			}
8266			d := auxIntToInt32(v_0.AuxInt)
8267			x := v_0.Args[0]
8268			y := v_1
8269			if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8270				continue
8271			}
8272			v.reset(OpAMD64LEAL1)
8273			v.AuxInt = int32ToAuxInt(c + d)
8274			v.Aux = symToAux(s)
8275			v.AddArg2(x, y)
8276			return true
8277		}
8278		break
8279	}
8280	// match: (LEAL1 [c] {s} x (SHLLconst [1] y))
8281	// result: (LEAL2 [c] {s} x y)
8282	for {
8283		c := auxIntToInt32(v.AuxInt)
8284		s := auxToSym(v.Aux)
8285		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8286			x := v_0
8287			if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8288				continue
8289			}
8290			y := v_1.Args[0]
8291			v.reset(OpAMD64LEAL2)
8292			v.AuxInt = int32ToAuxInt(c)
8293			v.Aux = symToAux(s)
8294			v.AddArg2(x, y)
8295			return true
8296		}
8297		break
8298	}
8299	// match: (LEAL1 [c] {s} x (SHLLconst [2] y))
8300	// result: (LEAL4 [c] {s} x y)
8301	for {
8302		c := auxIntToInt32(v.AuxInt)
8303		s := auxToSym(v.Aux)
8304		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8305			x := v_0
8306			if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8307				continue
8308			}
8309			y := v_1.Args[0]
8310			v.reset(OpAMD64LEAL4)
8311			v.AuxInt = int32ToAuxInt(c)
8312			v.Aux = symToAux(s)
8313			v.AddArg2(x, y)
8314			return true
8315		}
8316		break
8317	}
8318	// match: (LEAL1 [c] {s} x (SHLLconst [3] y))
8319	// result: (LEAL8 [c] {s} x y)
8320	for {
8321		c := auxIntToInt32(v.AuxInt)
8322		s := auxToSym(v.Aux)
8323		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8324			x := v_0
8325			if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8326				continue
8327			}
8328			y := v_1.Args[0]
8329			v.reset(OpAMD64LEAL8)
8330			v.AuxInt = int32ToAuxInt(c)
8331			v.Aux = symToAux(s)
8332			v.AddArg2(x, y)
8333			return true
8334		}
8335		break
8336	}
8337	return false
8338}
8339func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8340	v_1 := v.Args[1]
8341	v_0 := v.Args[0]
8342	// match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
8343	// cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
8344	// result: (LEAL2 [c+d] {s} x y)
8345	for {
8346		c := auxIntToInt32(v.AuxInt)
8347		s := auxToSym(v.Aux)
8348		if v_0.Op != OpAMD64ADDLconst {
8349			break
8350		}
8351		d := auxIntToInt32(v_0.AuxInt)
8352		x := v_0.Args[0]
8353		y := v_1
8354		if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8355			break
8356		}
8357		v.reset(OpAMD64LEAL2)
8358		v.AuxInt = int32ToAuxInt(c + d)
8359		v.Aux = symToAux(s)
8360		v.AddArg2(x, y)
8361		return true
8362	}
8363	// match: (LEAL2 [c] {s} x (ADDLconst [d] y))
8364	// cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
8365	// result: (LEAL2 [c+2*d] {s} x y)
8366	for {
8367		c := auxIntToInt32(v.AuxInt)
8368		s := auxToSym(v.Aux)
8369		x := v_0
8370		if v_1.Op != OpAMD64ADDLconst {
8371			break
8372		}
8373		d := auxIntToInt32(v_1.AuxInt)
8374		y := v_1.Args[0]
8375		if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8376			break
8377		}
8378		v.reset(OpAMD64LEAL2)
8379		v.AuxInt = int32ToAuxInt(c + 2*d)
8380		v.Aux = symToAux(s)
8381		v.AddArg2(x, y)
8382		return true
8383	}
8384	// match: (LEAL2 [c] {s} x (SHLLconst [1] y))
8385	// result: (LEAL4 [c] {s} x y)
8386	for {
8387		c := auxIntToInt32(v.AuxInt)
8388		s := auxToSym(v.Aux)
8389		x := v_0
8390		if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8391			break
8392		}
8393		y := v_1.Args[0]
8394		v.reset(OpAMD64LEAL4)
8395		v.AuxInt = int32ToAuxInt(c)
8396		v.Aux = symToAux(s)
8397		v.AddArg2(x, y)
8398		return true
8399	}
8400	// match: (LEAL2 [c] {s} x (SHLLconst [2] y))
8401	// result: (LEAL8 [c] {s} x y)
8402	for {
8403		c := auxIntToInt32(v.AuxInt)
8404		s := auxToSym(v.Aux)
8405		x := v_0
8406		if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8407			break
8408		}
8409		y := v_1.Args[0]
8410		v.reset(OpAMD64LEAL8)
8411		v.AuxInt = int32ToAuxInt(c)
8412		v.Aux = symToAux(s)
8413		v.AddArg2(x, y)
8414		return true
8415	}
8416	return false
8417}
8418func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8419	v_1 := v.Args[1]
8420	v_0 := v.Args[0]
8421	// match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
8422	// cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
8423	// result: (LEAL4 [c+d] {s} x y)
8424	for {
8425		c := auxIntToInt32(v.AuxInt)
8426		s := auxToSym(v.Aux)
8427		if v_0.Op != OpAMD64ADDLconst {
8428			break
8429		}
8430		d := auxIntToInt32(v_0.AuxInt)
8431		x := v_0.Args[0]
8432		y := v_1
8433		if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8434			break
8435		}
8436		v.reset(OpAMD64LEAL4)
8437		v.AuxInt = int32ToAuxInt(c + d)
8438		v.Aux = symToAux(s)
8439		v.AddArg2(x, y)
8440		return true
8441	}
8442	// match: (LEAL4 [c] {s} x (ADDLconst [d] y))
8443	// cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
8444	// result: (LEAL4 [c+4*d] {s} x y)
8445	for {
8446		c := auxIntToInt32(v.AuxInt)
8447		s := auxToSym(v.Aux)
8448		x := v_0
8449		if v_1.Op != OpAMD64ADDLconst {
8450			break
8451		}
8452		d := auxIntToInt32(v_1.AuxInt)
8453		y := v_1.Args[0]
8454		if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
8455			break
8456		}
8457		v.reset(OpAMD64LEAL4)
8458		v.AuxInt = int32ToAuxInt(c + 4*d)
8459		v.Aux = symToAux(s)
8460		v.AddArg2(x, y)
8461		return true
8462	}
8463	// match: (LEAL4 [c] {s} x (SHLLconst [1] y))
8464	// result: (LEAL8 [c] {s} x y)
8465	for {
8466		c := auxIntToInt32(v.AuxInt)
8467		s := auxToSym(v.Aux)
8468		x := v_0
8469		if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8470			break
8471		}
8472		y := v_1.Args[0]
8473		v.reset(OpAMD64LEAL8)
8474		v.AuxInt = int32ToAuxInt(c)
8475		v.Aux = symToAux(s)
8476		v.AddArg2(x, y)
8477		return true
8478	}
8479	return false
8480}
8481func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8482	v_1 := v.Args[1]
8483	v_0 := v.Args[0]
8484	// match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
8485	// cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
8486	// result: (LEAL8 [c+d] {s} x y)
8487	for {
8488		c := auxIntToInt32(v.AuxInt)
8489		s := auxToSym(v.Aux)
8490		if v_0.Op != OpAMD64ADDLconst {
8491			break
8492		}
8493		d := auxIntToInt32(v_0.AuxInt)
8494		x := v_0.Args[0]
8495		y := v_1
8496		if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8497			break
8498		}
8499		v.reset(OpAMD64LEAL8)
8500		v.AuxInt = int32ToAuxInt(c + d)
8501		v.Aux = symToAux(s)
8502		v.AddArg2(x, y)
8503		return true
8504	}
8505	// match: (LEAL8 [c] {s} x (ADDLconst [d] y))
8506	// cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
8507	// result: (LEAL8 [c+8*d] {s} x y)
8508	for {
8509		c := auxIntToInt32(v.AuxInt)
8510		s := auxToSym(v.Aux)
8511		x := v_0
8512		if v_1.Op != OpAMD64ADDLconst {
8513			break
8514		}
8515		d := auxIntToInt32(v_1.AuxInt)
8516		y := v_1.Args[0]
8517		if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
8518			break
8519		}
8520		v.reset(OpAMD64LEAL8)
8521		v.AuxInt = int32ToAuxInt(c + 8*d)
8522		v.Aux = symToAux(s)
8523		v.AddArg2(x, y)
8524		return true
8525	}
8526	return false
8527}
8528func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
8529	v_0 := v.Args[0]
8530	// match: (LEAQ [c] {s} (ADDQconst [d] x))
8531	// cond: is32Bit(int64(c)+int64(d))
8532	// result: (LEAQ [c+d] {s} x)
8533	for {
8534		c := auxIntToInt32(v.AuxInt)
8535		s := auxToSym(v.Aux)
8536		if v_0.Op != OpAMD64ADDQconst {
8537			break
8538		}
8539		d := auxIntToInt32(v_0.AuxInt)
8540		x := v_0.Args[0]
8541		if !(is32Bit(int64(c) + int64(d))) {
8542			break
8543		}
8544		v.reset(OpAMD64LEAQ)
8545		v.AuxInt = int32ToAuxInt(c + d)
8546		v.Aux = symToAux(s)
8547		v.AddArg(x)
8548		return true
8549	}
8550	// match: (LEAQ [c] {s} (ADDQ x y))
8551	// cond: x.Op != OpSB && y.Op != OpSB
8552	// result: (LEAQ1 [c] {s} x y)
8553	for {
8554		c := auxIntToInt32(v.AuxInt)
8555		s := auxToSym(v.Aux)
8556		if v_0.Op != OpAMD64ADDQ {
8557			break
8558		}
8559		_ = v_0.Args[1]
8560		v_0_0 := v_0.Args[0]
8561		v_0_1 := v_0.Args[1]
8562		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8563			x := v_0_0
8564			y := v_0_1
8565			if !(x.Op != OpSB && y.Op != OpSB) {
8566				continue
8567			}
8568			v.reset(OpAMD64LEAQ1)
8569			v.AuxInt = int32ToAuxInt(c)
8570			v.Aux = symToAux(s)
8571			v.AddArg2(x, y)
8572			return true
8573		}
8574		break
8575	}
8576	// match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
8577	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8578	// result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
8579	for {
8580		off1 := auxIntToInt32(v.AuxInt)
8581		sym1 := auxToSym(v.Aux)
8582		if v_0.Op != OpAMD64LEAQ {
8583			break
8584		}
8585		off2 := auxIntToInt32(v_0.AuxInt)
8586		sym2 := auxToSym(v_0.Aux)
8587		x := v_0.Args[0]
8588		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8589			break
8590		}
8591		v.reset(OpAMD64LEAQ)
8592		v.AuxInt = int32ToAuxInt(off1 + off2)
8593		v.Aux = symToAux(mergeSym(sym1, sym2))
8594		v.AddArg(x)
8595		return true
8596	}
8597	// match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
8598	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8599	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
8600	for {
8601		off1 := auxIntToInt32(v.AuxInt)
8602		sym1 := auxToSym(v.Aux)
8603		if v_0.Op != OpAMD64LEAQ1 {
8604			break
8605		}
8606		off2 := auxIntToInt32(v_0.AuxInt)
8607		sym2 := auxToSym(v_0.Aux)
8608		y := v_0.Args[1]
8609		x := v_0.Args[0]
8610		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8611			break
8612		}
8613		v.reset(OpAMD64LEAQ1)
8614		v.AuxInt = int32ToAuxInt(off1 + off2)
8615		v.Aux = symToAux(mergeSym(sym1, sym2))
8616		v.AddArg2(x, y)
8617		return true
8618	}
8619	// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
8620	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8621	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
8622	for {
8623		off1 := auxIntToInt32(v.AuxInt)
8624		sym1 := auxToSym(v.Aux)
8625		if v_0.Op != OpAMD64LEAQ2 {
8626			break
8627		}
8628		off2 := auxIntToInt32(v_0.AuxInt)
8629		sym2 := auxToSym(v_0.Aux)
8630		y := v_0.Args[1]
8631		x := v_0.Args[0]
8632		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8633			break
8634		}
8635		v.reset(OpAMD64LEAQ2)
8636		v.AuxInt = int32ToAuxInt(off1 + off2)
8637		v.Aux = symToAux(mergeSym(sym1, sym2))
8638		v.AddArg2(x, y)
8639		return true
8640	}
8641	// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
8642	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8643	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
8644	for {
8645		off1 := auxIntToInt32(v.AuxInt)
8646		sym1 := auxToSym(v.Aux)
8647		if v_0.Op != OpAMD64LEAQ4 {
8648			break
8649		}
8650		off2 := auxIntToInt32(v_0.AuxInt)
8651		sym2 := auxToSym(v_0.Aux)
8652		y := v_0.Args[1]
8653		x := v_0.Args[0]
8654		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8655			break
8656		}
8657		v.reset(OpAMD64LEAQ4)
8658		v.AuxInt = int32ToAuxInt(off1 + off2)
8659		v.Aux = symToAux(mergeSym(sym1, sym2))
8660		v.AddArg2(x, y)
8661		return true
8662	}
8663	// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
8664	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8665	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
8666	for {
8667		off1 := auxIntToInt32(v.AuxInt)
8668		sym1 := auxToSym(v.Aux)
8669		if v_0.Op != OpAMD64LEAQ8 {
8670			break
8671		}
8672		off2 := auxIntToInt32(v_0.AuxInt)
8673		sym2 := auxToSym(v_0.Aux)
8674		y := v_0.Args[1]
8675		x := v_0.Args[0]
8676		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8677			break
8678		}
8679		v.reset(OpAMD64LEAQ8)
8680		v.AuxInt = int32ToAuxInt(off1 + off2)
8681		v.Aux = symToAux(mergeSym(sym1, sym2))
8682		v.AddArg2(x, y)
8683		return true
8684	}
8685	return false
8686}
8687func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
8688	v_1 := v.Args[1]
8689	v_0 := v.Args[0]
8690	// match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
8691	// cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
8692	// result: (LEAQ1 [c+d] {s} x y)
8693	for {
8694		c := auxIntToInt32(v.AuxInt)
8695		s := auxToSym(v.Aux)
8696		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8697			if v_0.Op != OpAMD64ADDQconst {
8698				continue
8699			}
8700			d := auxIntToInt32(v_0.AuxInt)
8701			x := v_0.Args[0]
8702			y := v_1
8703			if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8704				continue
8705			}
8706			v.reset(OpAMD64LEAQ1)
8707			v.AuxInt = int32ToAuxInt(c + d)
8708			v.Aux = symToAux(s)
8709			v.AddArg2(x, y)
8710			return true
8711		}
8712		break
8713	}
8714	// match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
8715	// result: (LEAQ2 [c] {s} x y)
8716	for {
8717		c := auxIntToInt32(v.AuxInt)
8718		s := auxToSym(v.Aux)
8719		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8720			x := v_0
8721			if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8722				continue
8723			}
8724			y := v_1.Args[0]
8725			v.reset(OpAMD64LEAQ2)
8726			v.AuxInt = int32ToAuxInt(c)
8727			v.Aux = symToAux(s)
8728			v.AddArg2(x, y)
8729			return true
8730		}
8731		break
8732	}
8733	// match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
8734	// result: (LEAQ4 [c] {s} x y)
8735	for {
8736		c := auxIntToInt32(v.AuxInt)
8737		s := auxToSym(v.Aux)
8738		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8739			x := v_0
8740			if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8741				continue
8742			}
8743			y := v_1.Args[0]
8744			v.reset(OpAMD64LEAQ4)
8745			v.AuxInt = int32ToAuxInt(c)
8746			v.Aux = symToAux(s)
8747			v.AddArg2(x, y)
8748			return true
8749		}
8750		break
8751	}
8752	// match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
8753	// result: (LEAQ8 [c] {s} x y)
8754	for {
8755		c := auxIntToInt32(v.AuxInt)
8756		s := auxToSym(v.Aux)
8757		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8758			x := v_0
8759			if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
8760				continue
8761			}
8762			y := v_1.Args[0]
8763			v.reset(OpAMD64LEAQ8)
8764			v.AuxInt = int32ToAuxInt(c)
8765			v.Aux = symToAux(s)
8766			v.AddArg2(x, y)
8767			return true
8768		}
8769		break
8770	}
8771	// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
8772	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
8773	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
8774	for {
8775		off1 := auxIntToInt32(v.AuxInt)
8776		sym1 := auxToSym(v.Aux)
8777		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8778			if v_0.Op != OpAMD64LEAQ {
8779				continue
8780			}
8781			off2 := auxIntToInt32(v_0.AuxInt)
8782			sym2 := auxToSym(v_0.Aux)
8783			x := v_0.Args[0]
8784			y := v_1
8785			if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8786				continue
8787			}
8788			v.reset(OpAMD64LEAQ1)
8789			v.AuxInt = int32ToAuxInt(off1 + off2)
8790			v.Aux = symToAux(mergeSym(sym1, sym2))
8791			v.AddArg2(x, y)
8792			return true
8793		}
8794		break
8795	}
8796	// match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
8797	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8798	// result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
8799	for {
8800		off1 := auxIntToInt32(v.AuxInt)
8801		sym1 := auxToSym(v.Aux)
8802		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8803			x := v_0
8804			if v_1.Op != OpAMD64LEAQ1 {
8805				continue
8806			}
8807			off2 := auxIntToInt32(v_1.AuxInt)
8808			sym2 := auxToSym(v_1.Aux)
8809			y := v_1.Args[1]
8810			if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8811				continue
8812			}
8813			v.reset(OpAMD64LEAQ2)
8814			v.AuxInt = int32ToAuxInt(off1 + off2)
8815			v.Aux = symToAux(mergeSym(sym1, sym2))
8816			v.AddArg2(x, y)
8817			return true
8818		}
8819		break
8820	}
8821	// match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y))
8822	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8823	// result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
8824	for {
8825		off1 := auxIntToInt32(v.AuxInt)
8826		sym1 := auxToSym(v.Aux)
8827		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8828			x := v_0
8829			if v_1.Op != OpAMD64LEAQ1 {
8830				continue
8831			}
8832			off2 := auxIntToInt32(v_1.AuxInt)
8833			sym2 := auxToSym(v_1.Aux)
8834			_ = v_1.Args[1]
8835			v_1_0 := v_1.Args[0]
8836			v_1_1 := v_1.Args[1]
8837			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
8838				if x != v_1_0 {
8839					continue
8840				}
8841				y := v_1_1
8842				if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8843					continue
8844				}
8845				v.reset(OpAMD64LEAQ2)
8846				v.AuxInt = int32ToAuxInt(off1 + off2)
8847				v.Aux = symToAux(mergeSym(sym1, sym2))
8848				v.AddArg2(y, x)
8849				return true
8850			}
8851		}
8852		break
8853	}
8854	// match: (LEAQ1 [0] x y)
8855	// cond: v.Aux == nil
8856	// result: (ADDQ x y)
8857	for {
8858		if auxIntToInt32(v.AuxInt) != 0 {
8859			break
8860		}
8861		x := v_0
8862		y := v_1
8863		if !(v.Aux == nil) {
8864			break
8865		}
8866		v.reset(OpAMD64ADDQ)
8867		v.AddArg2(x, y)
8868		return true
8869	}
8870	return false
8871}
8872func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
8873	v_1 := v.Args[1]
8874	v_0 := v.Args[0]
8875	// match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
8876	// cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
8877	// result: (LEAQ2 [c+d] {s} x y)
8878	for {
8879		c := auxIntToInt32(v.AuxInt)
8880		s := auxToSym(v.Aux)
8881		if v_0.Op != OpAMD64ADDQconst {
8882			break
8883		}
8884		d := auxIntToInt32(v_0.AuxInt)
8885		x := v_0.Args[0]
8886		y := v_1
8887		if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8888			break
8889		}
8890		v.reset(OpAMD64LEAQ2)
8891		v.AuxInt = int32ToAuxInt(c + d)
8892		v.Aux = symToAux(s)
8893		v.AddArg2(x, y)
8894		return true
8895	}
8896	// match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
8897	// cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
8898	// result: (LEAQ2 [c+2*d] {s} x y)
8899	for {
8900		c := auxIntToInt32(v.AuxInt)
8901		s := auxToSym(v.Aux)
8902		x := v_0
8903		if v_1.Op != OpAMD64ADDQconst {
8904			break
8905		}
8906		d := auxIntToInt32(v_1.AuxInt)
8907		y := v_1.Args[0]
8908		if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8909			break
8910		}
8911		v.reset(OpAMD64LEAQ2)
8912		v.AuxInt = int32ToAuxInt(c + 2*d)
8913		v.Aux = symToAux(s)
8914		v.AddArg2(x, y)
8915		return true
8916	}
8917	// match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
8918	// result: (LEAQ4 [c] {s} x y)
8919	for {
8920		c := auxIntToInt32(v.AuxInt)
8921		s := auxToSym(v.Aux)
8922		x := v_0
8923		if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8924			break
8925		}
8926		y := v_1.Args[0]
8927		v.reset(OpAMD64LEAQ4)
8928		v.AuxInt = int32ToAuxInt(c)
8929		v.Aux = symToAux(s)
8930		v.AddArg2(x, y)
8931		return true
8932	}
8933	// match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
8934	// result: (LEAQ8 [c] {s} x y)
8935	for {
8936		c := auxIntToInt32(v.AuxInt)
8937		s := auxToSym(v.Aux)
8938		x := v_0
8939		if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8940			break
8941		}
8942		y := v_1.Args[0]
8943		v.reset(OpAMD64LEAQ8)
8944		v.AuxInt = int32ToAuxInt(c)
8945		v.Aux = symToAux(s)
8946		v.AddArg2(x, y)
8947		return true
8948	}
8949	// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
8950	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
8951	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
8952	for {
8953		off1 := auxIntToInt32(v.AuxInt)
8954		sym1 := auxToSym(v.Aux)
8955		if v_0.Op != OpAMD64LEAQ {
8956			break
8957		}
8958		off2 := auxIntToInt32(v_0.AuxInt)
8959		sym2 := auxToSym(v_0.Aux)
8960		x := v_0.Args[0]
8961		y := v_1
8962		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8963			break
8964		}
8965		v.reset(OpAMD64LEAQ2)
8966		v.AuxInt = int32ToAuxInt(off1 + off2)
8967		v.Aux = symToAux(mergeSym(sym1, sym2))
8968		v.AddArg2(x, y)
8969		return true
8970	}
8971	// match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
8972	// cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil
8973	// result: (LEAQ4 [off1+2*off2] {sym1} x y)
8974	for {
8975		off1 := auxIntToInt32(v.AuxInt)
8976		sym1 := auxToSym(v.Aux)
8977		x := v_0
8978		if v_1.Op != OpAMD64LEAQ1 {
8979			break
8980		}
8981		off2 := auxIntToInt32(v_1.AuxInt)
8982		sym2 := auxToSym(v_1.Aux)
8983		y := v_1.Args[1]
8984		if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
8985			break
8986		}
8987		v.reset(OpAMD64LEAQ4)
8988		v.AuxInt = int32ToAuxInt(off1 + 2*off2)
8989		v.Aux = symToAux(sym1)
8990		v.AddArg2(x, y)
8991		return true
8992	}
8993	// match: (LEAQ2 [off] {sym} x (MOVQconst [scale]))
8994	// cond: is32Bit(int64(off)+int64(scale)*2)
8995	// result: (LEAQ [off+int32(scale)*2] {sym} x)
8996	for {
8997		off := auxIntToInt32(v.AuxInt)
8998		sym := auxToSym(v.Aux)
8999		x := v_0
9000		if v_1.Op != OpAMD64MOVQconst {
9001			break
9002		}
9003		scale := auxIntToInt64(v_1.AuxInt)
9004		if !(is32Bit(int64(off) + int64(scale)*2)) {
9005			break
9006		}
9007		v.reset(OpAMD64LEAQ)
9008		v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9009		v.Aux = symToAux(sym)
9010		v.AddArg(x)
9011		return true
9012	}
9013	// match: (LEAQ2 [off] {sym} x (MOVLconst [scale]))
9014	// cond: is32Bit(int64(off)+int64(scale)*2)
9015	// result: (LEAQ [off+int32(scale)*2] {sym} x)
9016	for {
9017		off := auxIntToInt32(v.AuxInt)
9018		sym := auxToSym(v.Aux)
9019		x := v_0
9020		if v_1.Op != OpAMD64MOVLconst {
9021			break
9022		}
9023		scale := auxIntToInt32(v_1.AuxInt)
9024		if !(is32Bit(int64(off) + int64(scale)*2)) {
9025			break
9026		}
9027		v.reset(OpAMD64LEAQ)
9028		v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9029		v.Aux = symToAux(sym)
9030		v.AddArg(x)
9031		return true
9032	}
9033	return false
9034}
9035func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9036	v_1 := v.Args[1]
9037	v_0 := v.Args[0]
9038	// match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
9039	// cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9040	// result: (LEAQ4 [c+d] {s} x y)
9041	for {
9042		c := auxIntToInt32(v.AuxInt)
9043		s := auxToSym(v.Aux)
9044		if v_0.Op != OpAMD64ADDQconst {
9045			break
9046		}
9047		d := auxIntToInt32(v_0.AuxInt)
9048		x := v_0.Args[0]
9049		y := v_1
9050		if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9051			break
9052		}
9053		v.reset(OpAMD64LEAQ4)
9054		v.AuxInt = int32ToAuxInt(c + d)
9055		v.Aux = symToAux(s)
9056		v.AddArg2(x, y)
9057		return true
9058	}
9059	// match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
9060	// cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
9061	// result: (LEAQ4 [c+4*d] {s} x y)
9062	for {
9063		c := auxIntToInt32(v.AuxInt)
9064		s := auxToSym(v.Aux)
9065		x := v_0
9066		if v_1.Op != OpAMD64ADDQconst {
9067			break
9068		}
9069		d := auxIntToInt32(v_1.AuxInt)
9070		y := v_1.Args[0]
9071		if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9072			break
9073		}
9074		v.reset(OpAMD64LEAQ4)
9075		v.AuxInt = int32ToAuxInt(c + 4*d)
9076		v.Aux = symToAux(s)
9077		v.AddArg2(x, y)
9078		return true
9079	}
9080	// match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
9081	// result: (LEAQ8 [c] {s} x y)
9082	for {
9083		c := auxIntToInt32(v.AuxInt)
9084		s := auxToSym(v.Aux)
9085		x := v_0
9086		if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9087			break
9088		}
9089		y := v_1.Args[0]
9090		v.reset(OpAMD64LEAQ8)
9091		v.AuxInt = int32ToAuxInt(c)
9092		v.Aux = symToAux(s)
9093		v.AddArg2(x, y)
9094		return true
9095	}
9096	// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
9097	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
9098	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
9099	for {
9100		off1 := auxIntToInt32(v.AuxInt)
9101		sym1 := auxToSym(v.Aux)
9102		if v_0.Op != OpAMD64LEAQ {
9103			break
9104		}
9105		off2 := auxIntToInt32(v_0.AuxInt)
9106		sym2 := auxToSym(v_0.Aux)
9107		x := v_0.Args[0]
9108		y := v_1
9109		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9110			break
9111		}
9112		v.reset(OpAMD64LEAQ4)
9113		v.AuxInt = int32ToAuxInt(off1 + off2)
9114		v.Aux = symToAux(mergeSym(sym1, sym2))
9115		v.AddArg2(x, y)
9116		return true
9117	}
9118	// match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
9119	// cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil
9120	// result: (LEAQ8 [off1+4*off2] {sym1} x y)
9121	for {
9122		off1 := auxIntToInt32(v.AuxInt)
9123		sym1 := auxToSym(v.Aux)
9124		x := v_0
9125		if v_1.Op != OpAMD64LEAQ1 {
9126			break
9127		}
9128		off2 := auxIntToInt32(v_1.AuxInt)
9129		sym2 := auxToSym(v_1.Aux)
9130		y := v_1.Args[1]
9131		if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9132			break
9133		}
9134		v.reset(OpAMD64LEAQ8)
9135		v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9136		v.Aux = symToAux(sym1)
9137		v.AddArg2(x, y)
9138		return true
9139	}
9140	// match: (LEAQ4 [off] {sym} x (MOVQconst [scale]))
9141	// cond: is32Bit(int64(off)+int64(scale)*4)
9142	// result: (LEAQ [off+int32(scale)*4] {sym} x)
9143	for {
9144		off := auxIntToInt32(v.AuxInt)
9145		sym := auxToSym(v.Aux)
9146		x := v_0
9147		if v_1.Op != OpAMD64MOVQconst {
9148			break
9149		}
9150		scale := auxIntToInt64(v_1.AuxInt)
9151		if !(is32Bit(int64(off) + int64(scale)*4)) {
9152			break
9153		}
9154		v.reset(OpAMD64LEAQ)
9155		v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9156		v.Aux = symToAux(sym)
9157		v.AddArg(x)
9158		return true
9159	}
9160	// match: (LEAQ4 [off] {sym} x (MOVLconst [scale]))
9161	// cond: is32Bit(int64(off)+int64(scale)*4)
9162	// result: (LEAQ [off+int32(scale)*4] {sym} x)
9163	for {
9164		off := auxIntToInt32(v.AuxInt)
9165		sym := auxToSym(v.Aux)
9166		x := v_0
9167		if v_1.Op != OpAMD64MOVLconst {
9168			break
9169		}
9170		scale := auxIntToInt32(v_1.AuxInt)
9171		if !(is32Bit(int64(off) + int64(scale)*4)) {
9172			break
9173		}
9174		v.reset(OpAMD64LEAQ)
9175		v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9176		v.Aux = symToAux(sym)
9177		v.AddArg(x)
9178		return true
9179	}
9180	return false
9181}
9182func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9183	v_1 := v.Args[1]
9184	v_0 := v.Args[0]
9185	// match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
9186	// cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9187	// result: (LEAQ8 [c+d] {s} x y)
9188	for {
9189		c := auxIntToInt32(v.AuxInt)
9190		s := auxToSym(v.Aux)
9191		if v_0.Op != OpAMD64ADDQconst {
9192			break
9193		}
9194		d := auxIntToInt32(v_0.AuxInt)
9195		x := v_0.Args[0]
9196		y := v_1
9197		if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9198			break
9199		}
9200		v.reset(OpAMD64LEAQ8)
9201		v.AuxInt = int32ToAuxInt(c + d)
9202		v.Aux = symToAux(s)
9203		v.AddArg2(x, y)
9204		return true
9205	}
9206	// match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
9207	// cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
9208	// result: (LEAQ8 [c+8*d] {s} x y)
9209	for {
9210		c := auxIntToInt32(v.AuxInt)
9211		s := auxToSym(v.Aux)
9212		x := v_0
9213		if v_1.Op != OpAMD64ADDQconst {
9214			break
9215		}
9216		d := auxIntToInt32(v_1.AuxInt)
9217		y := v_1.Args[0]
9218		if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9219			break
9220		}
9221		v.reset(OpAMD64LEAQ8)
9222		v.AuxInt = int32ToAuxInt(c + 8*d)
9223		v.Aux = symToAux(s)
9224		v.AddArg2(x, y)
9225		return true
9226	}
9227	// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
9228	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
9229	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
9230	for {
9231		off1 := auxIntToInt32(v.AuxInt)
9232		sym1 := auxToSym(v.Aux)
9233		if v_0.Op != OpAMD64LEAQ {
9234			break
9235		}
9236		off2 := auxIntToInt32(v_0.AuxInt)
9237		sym2 := auxToSym(v_0.Aux)
9238		x := v_0.Args[0]
9239		y := v_1
9240		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9241			break
9242		}
9243		v.reset(OpAMD64LEAQ8)
9244		v.AuxInt = int32ToAuxInt(off1 + off2)
9245		v.Aux = symToAux(mergeSym(sym1, sym2))
9246		v.AddArg2(x, y)
9247		return true
9248	}
9249	// match: (LEAQ8 [off] {sym} x (MOVQconst [scale]))
9250	// cond: is32Bit(int64(off)+int64(scale)*8)
9251	// result: (LEAQ [off+int32(scale)*8] {sym} x)
9252	for {
9253		off := auxIntToInt32(v.AuxInt)
9254		sym := auxToSym(v.Aux)
9255		x := v_0
9256		if v_1.Op != OpAMD64MOVQconst {
9257			break
9258		}
9259		scale := auxIntToInt64(v_1.AuxInt)
9260		if !(is32Bit(int64(off) + int64(scale)*8)) {
9261			break
9262		}
9263		v.reset(OpAMD64LEAQ)
9264		v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9265		v.Aux = symToAux(sym)
9266		v.AddArg(x)
9267		return true
9268	}
9269	// match: (LEAQ8 [off] {sym} x (MOVLconst [scale]))
9270	// cond: is32Bit(int64(off)+int64(scale)*8)
9271	// result: (LEAQ [off+int32(scale)*8] {sym} x)
9272	for {
9273		off := auxIntToInt32(v.AuxInt)
9274		sym := auxToSym(v.Aux)
9275		x := v_0
9276		if v_1.Op != OpAMD64MOVLconst {
9277			break
9278		}
9279		scale := auxIntToInt32(v_1.AuxInt)
9280		if !(is32Bit(int64(off) + int64(scale)*8)) {
9281			break
9282		}
9283		v.reset(OpAMD64LEAQ)
9284		v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9285		v.Aux = symToAux(sym)
9286		v.AddArg(x)
9287		return true
9288	}
9289	return false
9290}
9291func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
9292	v_2 := v.Args[2]
9293	v_1 := v.Args[1]
9294	v_0 := v.Args[0]
9295	// match: (MOVBELstore [i] {s} p x:(BSWAPL w) mem)
9296	// cond: x.Uses == 1
9297	// result: (MOVLstore [i] {s} p w mem)
9298	for {
9299		i := auxIntToInt32(v.AuxInt)
9300		s := auxToSym(v.Aux)
9301		p := v_0
9302		x := v_1
9303		if x.Op != OpAMD64BSWAPL {
9304			break
9305		}
9306		w := x.Args[0]
9307		mem := v_2
9308		if !(x.Uses == 1) {
9309			break
9310		}
9311		v.reset(OpAMD64MOVLstore)
9312		v.AuxInt = int32ToAuxInt(i)
9313		v.Aux = symToAux(s)
9314		v.AddArg3(p, w, mem)
9315		return true
9316	}
9317	return false
9318}
9319func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
9320	v_2 := v.Args[2]
9321	v_1 := v.Args[1]
9322	v_0 := v.Args[0]
9323	// match: (MOVBEQstore [i] {s} p x:(BSWAPQ w) mem)
9324	// cond: x.Uses == 1
9325	// result: (MOVQstore [i] {s} p w mem)
9326	for {
9327		i := auxIntToInt32(v.AuxInt)
9328		s := auxToSym(v.Aux)
9329		p := v_0
9330		x := v_1
9331		if x.Op != OpAMD64BSWAPQ {
9332			break
9333		}
9334		w := x.Args[0]
9335		mem := v_2
9336		if !(x.Uses == 1) {
9337			break
9338		}
9339		v.reset(OpAMD64MOVQstore)
9340		v.AuxInt = int32ToAuxInt(i)
9341		v.Aux = symToAux(s)
9342		v.AddArg3(p, w, mem)
9343		return true
9344	}
9345	return false
9346}
9347func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool {
9348	v_2 := v.Args[2]
9349	v_1 := v.Args[1]
9350	v_0 := v.Args[0]
9351	// match: (MOVBEWstore [i] {s} p x:(ROLWconst [8] w) mem)
9352	// cond: x.Uses == 1
9353	// result: (MOVWstore [i] {s} p w mem)
9354	for {
9355		i := auxIntToInt32(v.AuxInt)
9356		s := auxToSym(v.Aux)
9357		p := v_0
9358		x := v_1
9359		if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
9360			break
9361		}
9362		w := x.Args[0]
9363		mem := v_2
9364		if !(x.Uses == 1) {
9365			break
9366		}
9367		v.reset(OpAMD64MOVWstore)
9368		v.AuxInt = int32ToAuxInt(i)
9369		v.Aux = symToAux(s)
9370		v.AddArg3(p, w, mem)
9371		return true
9372	}
9373	return false
9374}
9375func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9376	v_0 := v.Args[0]
9377	b := v.Block
9378	// match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
9379	// cond: x.Uses == 1 && clobber(x)
9380	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9381	for {
9382		x := v_0
9383		if x.Op != OpAMD64MOVBload {
9384			break
9385		}
9386		off := auxIntToInt32(x.AuxInt)
9387		sym := auxToSym(x.Aux)
9388		mem := x.Args[1]
9389		ptr := x.Args[0]
9390		if !(x.Uses == 1 && clobber(x)) {
9391			break
9392		}
9393		b = x.Block
9394		v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9395		v.copyOf(v0)
9396		v0.AuxInt = int32ToAuxInt(off)
9397		v0.Aux = symToAux(sym)
9398		v0.AddArg2(ptr, mem)
9399		return true
9400	}
9401	// match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
9402	// cond: x.Uses == 1 && clobber(x)
9403	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9404	for {
9405		x := v_0
9406		if x.Op != OpAMD64MOVWload {
9407			break
9408		}
9409		off := auxIntToInt32(x.AuxInt)
9410		sym := auxToSym(x.Aux)
9411		mem := x.Args[1]
9412		ptr := x.Args[0]
9413		if !(x.Uses == 1 && clobber(x)) {
9414			break
9415		}
9416		b = x.Block
9417		v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9418		v.copyOf(v0)
9419		v0.AuxInt = int32ToAuxInt(off)
9420		v0.Aux = symToAux(sym)
9421		v0.AddArg2(ptr, mem)
9422		return true
9423	}
9424	// match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
9425	// cond: x.Uses == 1 && clobber(x)
9426	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9427	for {
9428		x := v_0
9429		if x.Op != OpAMD64MOVLload {
9430			break
9431		}
9432		off := auxIntToInt32(x.AuxInt)
9433		sym := auxToSym(x.Aux)
9434		mem := x.Args[1]
9435		ptr := x.Args[0]
9436		if !(x.Uses == 1 && clobber(x)) {
9437			break
9438		}
9439		b = x.Block
9440		v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9441		v.copyOf(v0)
9442		v0.AuxInt = int32ToAuxInt(off)
9443		v0.Aux = symToAux(sym)
9444		v0.AddArg2(ptr, mem)
9445		return true
9446	}
9447	// match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
9448	// cond: x.Uses == 1 && clobber(x)
9449	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9450	for {
9451		x := v_0
9452		if x.Op != OpAMD64MOVQload {
9453			break
9454		}
9455		off := auxIntToInt32(x.AuxInt)
9456		sym := auxToSym(x.Aux)
9457		mem := x.Args[1]
9458		ptr := x.Args[0]
9459		if !(x.Uses == 1 && clobber(x)) {
9460			break
9461		}
9462		b = x.Block
9463		v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9464		v.copyOf(v0)
9465		v0.AuxInt = int32ToAuxInt(off)
9466		v0.Aux = symToAux(sym)
9467		v0.AddArg2(ptr, mem)
9468		return true
9469	}
9470	// match: (MOVBQSX (ANDLconst [c] x))
9471	// cond: c & 0x80 == 0
9472	// result: (ANDLconst [c & 0x7f] x)
9473	for {
9474		if v_0.Op != OpAMD64ANDLconst {
9475			break
9476		}
9477		c := auxIntToInt32(v_0.AuxInt)
9478		x := v_0.Args[0]
9479		if !(c&0x80 == 0) {
9480			break
9481		}
9482		v.reset(OpAMD64ANDLconst)
9483		v.AuxInt = int32ToAuxInt(c & 0x7f)
9484		v.AddArg(x)
9485		return true
9486	}
9487	// match: (MOVBQSX (MOVBQSX x))
9488	// result: (MOVBQSX x)
9489	for {
9490		if v_0.Op != OpAMD64MOVBQSX {
9491			break
9492		}
9493		x := v_0.Args[0]
9494		v.reset(OpAMD64MOVBQSX)
9495		v.AddArg(x)
9496		return true
9497	}
9498	return false
9499}
9500func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9501	v_1 := v.Args[1]
9502	v_0 := v.Args[0]
9503	// match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
9504	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
9505	// result: (MOVBQSX x)
9506	for {
9507		off := auxIntToInt32(v.AuxInt)
9508		sym := auxToSym(v.Aux)
9509		ptr := v_0
9510		if v_1.Op != OpAMD64MOVBstore {
9511			break
9512		}
9513		off2 := auxIntToInt32(v_1.AuxInt)
9514		sym2 := auxToSym(v_1.Aux)
9515		x := v_1.Args[1]
9516		ptr2 := v_1.Args[0]
9517		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9518			break
9519		}
9520		v.reset(OpAMD64MOVBQSX)
9521		v.AddArg(x)
9522		return true
9523	}
9524	// match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
9525	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9526	// result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
9527	for {
9528		off1 := auxIntToInt32(v.AuxInt)
9529		sym1 := auxToSym(v.Aux)
9530		if v_0.Op != OpAMD64LEAQ {
9531			break
9532		}
9533		off2 := auxIntToInt32(v_0.AuxInt)
9534		sym2 := auxToSym(v_0.Aux)
9535		base := v_0.Args[0]
9536		mem := v_1
9537		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9538			break
9539		}
9540		v.reset(OpAMD64MOVBQSXload)
9541		v.AuxInt = int32ToAuxInt(off1 + off2)
9542		v.Aux = symToAux(mergeSym(sym1, sym2))
9543		v.AddArg2(base, mem)
9544		return true
9545	}
9546	return false
9547}
9548func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9549	v_0 := v.Args[0]
9550	b := v.Block
9551	// match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
9552	// cond: x.Uses == 1 && clobber(x)
9553	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
9554	for {
9555		x := v_0
9556		if x.Op != OpAMD64MOVBload {
9557			break
9558		}
9559		off := auxIntToInt32(x.AuxInt)
9560		sym := auxToSym(x.Aux)
9561		mem := x.Args[1]
9562		ptr := x.Args[0]
9563		if !(x.Uses == 1 && clobber(x)) {
9564			break
9565		}
9566		b = x.Block
9567		v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9568		v.copyOf(v0)
9569		v0.AuxInt = int32ToAuxInt(off)
9570		v0.Aux = symToAux(sym)
9571		v0.AddArg2(ptr, mem)
9572		return true
9573	}
9574	// match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
9575	// cond: x.Uses == 1 && clobber(x)
9576	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
9577	for {
9578		x := v_0
9579		if x.Op != OpAMD64MOVWload {
9580			break
9581		}
9582		off := auxIntToInt32(x.AuxInt)
9583		sym := auxToSym(x.Aux)
9584		mem := x.Args[1]
9585		ptr := x.Args[0]
9586		if !(x.Uses == 1 && clobber(x)) {
9587			break
9588		}
9589		b = x.Block
9590		v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9591		v.copyOf(v0)
9592		v0.AuxInt = int32ToAuxInt(off)
9593		v0.Aux = symToAux(sym)
9594		v0.AddArg2(ptr, mem)
9595		return true
9596	}
9597	// match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
9598	// cond: x.Uses == 1 && clobber(x)
9599	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
9600	for {
9601		x := v_0
9602		if x.Op != OpAMD64MOVLload {
9603			break
9604		}
9605		off := auxIntToInt32(x.AuxInt)
9606		sym := auxToSym(x.Aux)
9607		mem := x.Args[1]
9608		ptr := x.Args[0]
9609		if !(x.Uses == 1 && clobber(x)) {
9610			break
9611		}
9612		b = x.Block
9613		v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9614		v.copyOf(v0)
9615		v0.AuxInt = int32ToAuxInt(off)
9616		v0.Aux = symToAux(sym)
9617		v0.AddArg2(ptr, mem)
9618		return true
9619	}
9620	// match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
9621	// cond: x.Uses == 1 && clobber(x)
9622	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
9623	for {
9624		x := v_0
9625		if x.Op != OpAMD64MOVQload {
9626			break
9627		}
9628		off := auxIntToInt32(x.AuxInt)
9629		sym := auxToSym(x.Aux)
9630		mem := x.Args[1]
9631		ptr := x.Args[0]
9632		if !(x.Uses == 1 && clobber(x)) {
9633			break
9634		}
9635		b = x.Block
9636		v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9637		v.copyOf(v0)
9638		v0.AuxInt = int32ToAuxInt(off)
9639		v0.Aux = symToAux(sym)
9640		v0.AddArg2(ptr, mem)
9641		return true
9642	}
9643	// match: (MOVBQZX (ANDLconst [c] x))
9644	// result: (ANDLconst [c & 0xff] x)
9645	for {
9646		if v_0.Op != OpAMD64ANDLconst {
9647			break
9648		}
9649		c := auxIntToInt32(v_0.AuxInt)
9650		x := v_0.Args[0]
9651		v.reset(OpAMD64ANDLconst)
9652		v.AuxInt = int32ToAuxInt(c & 0xff)
9653		v.AddArg(x)
9654		return true
9655	}
9656	// match: (MOVBQZX (MOVBQZX x))
9657	// result: (MOVBQZX x)
9658	for {
9659		if v_0.Op != OpAMD64MOVBQZX {
9660			break
9661		}
9662		x := v_0.Args[0]
9663		v.reset(OpAMD64MOVBQZX)
9664		v.AddArg(x)
9665		return true
9666	}
9667	return false
9668}
9669func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
9670	v_1 := v.Args[1]
9671	v_0 := v.Args[0]
9672	// match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
9673	// cond: is32Bit(int64(off1)+int64(off2))
9674	// result: (MOVBatomicload [off1+off2] {sym} ptr mem)
9675	for {
9676		off1 := auxIntToInt32(v.AuxInt)
9677		sym := auxToSym(v.Aux)
9678		if v_0.Op != OpAMD64ADDQconst {
9679			break
9680		}
9681		off2 := auxIntToInt32(v_0.AuxInt)
9682		ptr := v_0.Args[0]
9683		mem := v_1
9684		if !(is32Bit(int64(off1) + int64(off2))) {
9685			break
9686		}
9687		v.reset(OpAMD64MOVBatomicload)
9688		v.AuxInt = int32ToAuxInt(off1 + off2)
9689		v.Aux = symToAux(sym)
9690		v.AddArg2(ptr, mem)
9691		return true
9692	}
9693	// match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
9694	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9695	// result: (MOVBatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
9696	for {
9697		off1 := auxIntToInt32(v.AuxInt)
9698		sym1 := auxToSym(v.Aux)
9699		if v_0.Op != OpAMD64LEAQ {
9700			break
9701		}
9702		off2 := auxIntToInt32(v_0.AuxInt)
9703		sym2 := auxToSym(v_0.Aux)
9704		ptr := v_0.Args[0]
9705		mem := v_1
9706		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9707			break
9708		}
9709		v.reset(OpAMD64MOVBatomicload)
9710		v.AuxInt = int32ToAuxInt(off1 + off2)
9711		v.Aux = symToAux(mergeSym(sym1, sym2))
9712		v.AddArg2(ptr, mem)
9713		return true
9714	}
9715	return false
9716}
9717func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
9718	v_1 := v.Args[1]
9719	v_0 := v.Args[0]
9720	// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
9721	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
9722	// result: (MOVBQZX x)
9723	for {
9724		off := auxIntToInt32(v.AuxInt)
9725		sym := auxToSym(v.Aux)
9726		ptr := v_0
9727		if v_1.Op != OpAMD64MOVBstore {
9728			break
9729		}
9730		off2 := auxIntToInt32(v_1.AuxInt)
9731		sym2 := auxToSym(v_1.Aux)
9732		x := v_1.Args[1]
9733		ptr2 := v_1.Args[0]
9734		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9735			break
9736		}
9737		v.reset(OpAMD64MOVBQZX)
9738		v.AddArg(x)
9739		return true
9740	}
9741	// match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
9742	// cond: is32Bit(int64(off1)+int64(off2))
9743	// result: (MOVBload [off1+off2] {sym} ptr mem)
9744	for {
9745		off1 := auxIntToInt32(v.AuxInt)
9746		sym := auxToSym(v.Aux)
9747		if v_0.Op != OpAMD64ADDQconst {
9748			break
9749		}
9750		off2 := auxIntToInt32(v_0.AuxInt)
9751		ptr := v_0.Args[0]
9752		mem := v_1
9753		if !(is32Bit(int64(off1) + int64(off2))) {
9754			break
9755		}
9756		v.reset(OpAMD64MOVBload)
9757		v.AuxInt = int32ToAuxInt(off1 + off2)
9758		v.Aux = symToAux(sym)
9759		v.AddArg2(ptr, mem)
9760		return true
9761	}
9762	// match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
9763	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9764	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
9765	for {
9766		off1 := auxIntToInt32(v.AuxInt)
9767		sym1 := auxToSym(v.Aux)
9768		if v_0.Op != OpAMD64LEAQ {
9769			break
9770		}
9771		off2 := auxIntToInt32(v_0.AuxInt)
9772		sym2 := auxToSym(v_0.Aux)
9773		base := v_0.Args[0]
9774		mem := v_1
9775		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9776			break
9777		}
9778		v.reset(OpAMD64MOVBload)
9779		v.AuxInt = int32ToAuxInt(off1 + off2)
9780		v.Aux = symToAux(mergeSym(sym1, sym2))
9781		v.AddArg2(base, mem)
9782		return true
9783	}
9784	// match: (MOVBload [off] {sym} (SB) _)
9785	// cond: symIsRO(sym)
9786	// result: (MOVLconst [int32(read8(sym, int64(off)))])
9787	for {
9788		off := auxIntToInt32(v.AuxInt)
9789		sym := auxToSym(v.Aux)
9790		if v_0.Op != OpSB || !(symIsRO(sym)) {
9791			break
9792		}
9793		v.reset(OpAMD64MOVLconst)
9794		v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
9795		return true
9796	}
9797	return false
9798}
9799func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
9800	v_2 := v.Args[2]
9801	v_1 := v.Args[1]
9802	v_0 := v.Args[0]
9803	// match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
9804	// cond: y.Uses == 1
9805	// result: (SETLstore [off] {sym} ptr x mem)
9806	for {
9807		off := auxIntToInt32(v.AuxInt)
9808		sym := auxToSym(v.Aux)
9809		ptr := v_0
9810		y := v_1
9811		if y.Op != OpAMD64SETL {
9812			break
9813		}
9814		x := y.Args[0]
9815		mem := v_2
9816		if !(y.Uses == 1) {
9817			break
9818		}
9819		v.reset(OpAMD64SETLstore)
9820		v.AuxInt = int32ToAuxInt(off)
9821		v.Aux = symToAux(sym)
9822		v.AddArg3(ptr, x, mem)
9823		return true
9824	}
9825	// match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
9826	// cond: y.Uses == 1
9827	// result: (SETLEstore [off] {sym} ptr x mem)
9828	for {
9829		off := auxIntToInt32(v.AuxInt)
9830		sym := auxToSym(v.Aux)
9831		ptr := v_0
9832		y := v_1
9833		if y.Op != OpAMD64SETLE {
9834			break
9835		}
9836		x := y.Args[0]
9837		mem := v_2
9838		if !(y.Uses == 1) {
9839			break
9840		}
9841		v.reset(OpAMD64SETLEstore)
9842		v.AuxInt = int32ToAuxInt(off)
9843		v.Aux = symToAux(sym)
9844		v.AddArg3(ptr, x, mem)
9845		return true
9846	}
9847	// match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
9848	// cond: y.Uses == 1
9849	// result: (SETGstore [off] {sym} ptr x mem)
9850	for {
9851		off := auxIntToInt32(v.AuxInt)
9852		sym := auxToSym(v.Aux)
9853		ptr := v_0
9854		y := v_1
9855		if y.Op != OpAMD64SETG {
9856			break
9857		}
9858		x := y.Args[0]
9859		mem := v_2
9860		if !(y.Uses == 1) {
9861			break
9862		}
9863		v.reset(OpAMD64SETGstore)
9864		v.AuxInt = int32ToAuxInt(off)
9865		v.Aux = symToAux(sym)
9866		v.AddArg3(ptr, x, mem)
9867		return true
9868	}
9869	// match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
9870	// cond: y.Uses == 1
9871	// result: (SETGEstore [off] {sym} ptr x mem)
9872	for {
9873		off := auxIntToInt32(v.AuxInt)
9874		sym := auxToSym(v.Aux)
9875		ptr := v_0
9876		y := v_1
9877		if y.Op != OpAMD64SETGE {
9878			break
9879		}
9880		x := y.Args[0]
9881		mem := v_2
9882		if !(y.Uses == 1) {
9883			break
9884		}
9885		v.reset(OpAMD64SETGEstore)
9886		v.AuxInt = int32ToAuxInt(off)
9887		v.Aux = symToAux(sym)
9888		v.AddArg3(ptr, x, mem)
9889		return true
9890	}
9891	// match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
9892	// cond: y.Uses == 1
9893	// result: (SETEQstore [off] {sym} ptr x mem)
9894	for {
9895		off := auxIntToInt32(v.AuxInt)
9896		sym := auxToSym(v.Aux)
9897		ptr := v_0
9898		y := v_1
9899		if y.Op != OpAMD64SETEQ {
9900			break
9901		}
9902		x := y.Args[0]
9903		mem := v_2
9904		if !(y.Uses == 1) {
9905			break
9906		}
9907		v.reset(OpAMD64SETEQstore)
9908		v.AuxInt = int32ToAuxInt(off)
9909		v.Aux = symToAux(sym)
9910		v.AddArg3(ptr, x, mem)
9911		return true
9912	}
9913	// match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
9914	// cond: y.Uses == 1
9915	// result: (SETNEstore [off] {sym} ptr x mem)
9916	for {
9917		off := auxIntToInt32(v.AuxInt)
9918		sym := auxToSym(v.Aux)
9919		ptr := v_0
9920		y := v_1
9921		if y.Op != OpAMD64SETNE {
9922			break
9923		}
9924		x := y.Args[0]
9925		mem := v_2
9926		if !(y.Uses == 1) {
9927			break
9928		}
9929		v.reset(OpAMD64SETNEstore)
9930		v.AuxInt = int32ToAuxInt(off)
9931		v.Aux = symToAux(sym)
9932		v.AddArg3(ptr, x, mem)
9933		return true
9934	}
9935	// match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
9936	// cond: y.Uses == 1
9937	// result: (SETBstore [off] {sym} ptr x mem)
9938	for {
9939		off := auxIntToInt32(v.AuxInt)
9940		sym := auxToSym(v.Aux)
9941		ptr := v_0
9942		y := v_1
9943		if y.Op != OpAMD64SETB {
9944			break
9945		}
9946		x := y.Args[0]
9947		mem := v_2
9948		if !(y.Uses == 1) {
9949			break
9950		}
9951		v.reset(OpAMD64SETBstore)
9952		v.AuxInt = int32ToAuxInt(off)
9953		v.Aux = symToAux(sym)
9954		v.AddArg3(ptr, x, mem)
9955		return true
9956	}
9957	// match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
9958	// cond: y.Uses == 1
9959	// result: (SETBEstore [off] {sym} ptr x mem)
9960	for {
9961		off := auxIntToInt32(v.AuxInt)
9962		sym := auxToSym(v.Aux)
9963		ptr := v_0
9964		y := v_1
9965		if y.Op != OpAMD64SETBE {
9966			break
9967		}
9968		x := y.Args[0]
9969		mem := v_2
9970		if !(y.Uses == 1) {
9971			break
9972		}
9973		v.reset(OpAMD64SETBEstore)
9974		v.AuxInt = int32ToAuxInt(off)
9975		v.Aux = symToAux(sym)
9976		v.AddArg3(ptr, x, mem)
9977		return true
9978	}
9979	// match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
9980	// cond: y.Uses == 1
9981	// result: (SETAstore [off] {sym} ptr x mem)
9982	for {
9983		off := auxIntToInt32(v.AuxInt)
9984		sym := auxToSym(v.Aux)
9985		ptr := v_0
9986		y := v_1
9987		if y.Op != OpAMD64SETA {
9988			break
9989		}
9990		x := y.Args[0]
9991		mem := v_2
9992		if !(y.Uses == 1) {
9993			break
9994		}
9995		v.reset(OpAMD64SETAstore)
9996		v.AuxInt = int32ToAuxInt(off)
9997		v.Aux = symToAux(sym)
9998		v.AddArg3(ptr, x, mem)
9999		return true
10000	}
10001	// match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
10002	// cond: y.Uses == 1
10003	// result: (SETAEstore [off] {sym} ptr x mem)
10004	for {
10005		off := auxIntToInt32(v.AuxInt)
10006		sym := auxToSym(v.Aux)
10007		ptr := v_0
10008		y := v_1
10009		if y.Op != OpAMD64SETAE {
10010			break
10011		}
10012		x := y.Args[0]
10013		mem := v_2
10014		if !(y.Uses == 1) {
10015			break
10016		}
10017		v.reset(OpAMD64SETAEstore)
10018		v.AuxInt = int32ToAuxInt(off)
10019		v.Aux = symToAux(sym)
10020		v.AddArg3(ptr, x, mem)
10021		return true
10022	}
10023	// match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
10024	// result: (MOVBstore [off] {sym} ptr x mem)
10025	for {
10026		off := auxIntToInt32(v.AuxInt)
10027		sym := auxToSym(v.Aux)
10028		ptr := v_0
10029		if v_1.Op != OpAMD64MOVBQSX {
10030			break
10031		}
10032		x := v_1.Args[0]
10033		mem := v_2
10034		v.reset(OpAMD64MOVBstore)
10035		v.AuxInt = int32ToAuxInt(off)
10036		v.Aux = symToAux(sym)
10037		v.AddArg3(ptr, x, mem)
10038		return true
10039	}
10040	// match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
10041	// result: (MOVBstore [off] {sym} ptr x mem)
10042	for {
10043		off := auxIntToInt32(v.AuxInt)
10044		sym := auxToSym(v.Aux)
10045		ptr := v_0
10046		if v_1.Op != OpAMD64MOVBQZX {
10047			break
10048		}
10049		x := v_1.Args[0]
10050		mem := v_2
10051		v.reset(OpAMD64MOVBstore)
10052		v.AuxInt = int32ToAuxInt(off)
10053		v.Aux = symToAux(sym)
10054		v.AddArg3(ptr, x, mem)
10055		return true
10056	}
10057	// match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
10058	// cond: is32Bit(int64(off1)+int64(off2))
10059	// result: (MOVBstore [off1+off2] {sym} ptr val mem)
10060	for {
10061		off1 := auxIntToInt32(v.AuxInt)
10062		sym := auxToSym(v.Aux)
10063		if v_0.Op != OpAMD64ADDQconst {
10064			break
10065		}
10066		off2 := auxIntToInt32(v_0.AuxInt)
10067		ptr := v_0.Args[0]
10068		val := v_1
10069		mem := v_2
10070		if !(is32Bit(int64(off1) + int64(off2))) {
10071			break
10072		}
10073		v.reset(OpAMD64MOVBstore)
10074		v.AuxInt = int32ToAuxInt(off1 + off2)
10075		v.Aux = symToAux(sym)
10076		v.AddArg3(ptr, val, mem)
10077		return true
10078	}
10079	// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
10080	// result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
10081	for {
10082		off := auxIntToInt32(v.AuxInt)
10083		sym := auxToSym(v.Aux)
10084		ptr := v_0
10085		if v_1.Op != OpAMD64MOVLconst {
10086			break
10087		}
10088		c := auxIntToInt32(v_1.AuxInt)
10089		mem := v_2
10090		v.reset(OpAMD64MOVBstoreconst)
10091		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10092		v.Aux = symToAux(sym)
10093		v.AddArg2(ptr, mem)
10094		return true
10095	}
10096	// match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
10097	// result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
10098	for {
10099		off := auxIntToInt32(v.AuxInt)
10100		sym := auxToSym(v.Aux)
10101		ptr := v_0
10102		if v_1.Op != OpAMD64MOVQconst {
10103			break
10104		}
10105		c := auxIntToInt64(v_1.AuxInt)
10106		mem := v_2
10107		v.reset(OpAMD64MOVBstoreconst)
10108		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10109		v.Aux = symToAux(sym)
10110		v.AddArg2(ptr, mem)
10111		return true
10112	}
10113	// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
10114	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10115	// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
10116	for {
10117		off1 := auxIntToInt32(v.AuxInt)
10118		sym1 := auxToSym(v.Aux)
10119		if v_0.Op != OpAMD64LEAQ {
10120			break
10121		}
10122		off2 := auxIntToInt32(v_0.AuxInt)
10123		sym2 := auxToSym(v_0.Aux)
10124		base := v_0.Args[0]
10125		val := v_1
10126		mem := v_2
10127		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10128			break
10129		}
10130		v.reset(OpAMD64MOVBstore)
10131		v.AuxInt = int32ToAuxInt(off1 + off2)
10132		v.Aux = symToAux(mergeSym(sym1, sym2))
10133		v.AddArg3(base, val, mem)
10134		return true
10135	}
10136	return false
10137}
10138func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
10139	v_1 := v.Args[1]
10140	v_0 := v.Args[0]
10141	// match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
10142	// cond: ValAndOff(sc).canAdd32(off)
10143	// result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
10144	for {
10145		sc := auxIntToValAndOff(v.AuxInt)
10146		s := auxToSym(v.Aux)
10147		if v_0.Op != OpAMD64ADDQconst {
10148			break
10149		}
10150		off := auxIntToInt32(v_0.AuxInt)
10151		ptr := v_0.Args[0]
10152		mem := v_1
10153		if !(ValAndOff(sc).canAdd32(off)) {
10154			break
10155		}
10156		v.reset(OpAMD64MOVBstoreconst)
10157		v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10158		v.Aux = symToAux(s)
10159		v.AddArg2(ptr, mem)
10160		return true
10161	}
10162	// match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
10163	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
10164	// result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
10165	for {
10166		sc := auxIntToValAndOff(v.AuxInt)
10167		sym1 := auxToSym(v.Aux)
10168		if v_0.Op != OpAMD64LEAQ {
10169			break
10170		}
10171		off := auxIntToInt32(v_0.AuxInt)
10172		sym2 := auxToSym(v_0.Aux)
10173		ptr := v_0.Args[0]
10174		mem := v_1
10175		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
10176			break
10177		}
10178		v.reset(OpAMD64MOVBstoreconst)
10179		v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10180		v.Aux = symToAux(mergeSym(sym1, sym2))
10181		v.AddArg2(ptr, mem)
10182		return true
10183	}
10184	return false
10185}
10186func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
10187	v_0 := v.Args[0]
10188	b := v.Block
10189	// match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
10190	// cond: x.Uses == 1 && clobber(x)
10191	// result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
10192	for {
10193		x := v_0
10194		if x.Op != OpAMD64MOVLload {
10195			break
10196		}
10197		off := auxIntToInt32(x.AuxInt)
10198		sym := auxToSym(x.Aux)
10199		mem := x.Args[1]
10200		ptr := x.Args[0]
10201		if !(x.Uses == 1 && clobber(x)) {
10202			break
10203		}
10204		b = x.Block
10205		v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10206		v.copyOf(v0)
10207		v0.AuxInt = int32ToAuxInt(off)
10208		v0.Aux = symToAux(sym)
10209		v0.AddArg2(ptr, mem)
10210		return true
10211	}
10212	// match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem))
10213	// cond: x.Uses == 1 && clobber(x)
10214	// result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
10215	for {
10216		x := v_0
10217		if x.Op != OpAMD64MOVQload {
10218			break
10219		}
10220		off := auxIntToInt32(x.AuxInt)
10221		sym := auxToSym(x.Aux)
10222		mem := x.Args[1]
10223		ptr := x.Args[0]
10224		if !(x.Uses == 1 && clobber(x)) {
10225			break
10226		}
10227		b = x.Block
10228		v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10229		v.copyOf(v0)
10230		v0.AuxInt = int32ToAuxInt(off)
10231		v0.Aux = symToAux(sym)
10232		v0.AddArg2(ptr, mem)
10233		return true
10234	}
10235	// match: (MOVLQSX (ANDLconst [c] x))
10236	// cond: uint32(c) & 0x80000000 == 0
10237	// result: (ANDLconst [c & 0x7fffffff] x)
10238	for {
10239		if v_0.Op != OpAMD64ANDLconst {
10240			break
10241		}
10242		c := auxIntToInt32(v_0.AuxInt)
10243		x := v_0.Args[0]
10244		if !(uint32(c)&0x80000000 == 0) {
10245			break
10246		}
10247		v.reset(OpAMD64ANDLconst)
10248		v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
10249		v.AddArg(x)
10250		return true
10251	}
10252	// match: (MOVLQSX (MOVLQSX x))
10253	// result: (MOVLQSX x)
10254	for {
10255		if v_0.Op != OpAMD64MOVLQSX {
10256			break
10257		}
10258		x := v_0.Args[0]
10259		v.reset(OpAMD64MOVLQSX)
10260		v.AddArg(x)
10261		return true
10262	}
10263	// match: (MOVLQSX (MOVWQSX x))
10264	// result: (MOVWQSX x)
10265	for {
10266		if v_0.Op != OpAMD64MOVWQSX {
10267			break
10268		}
10269		x := v_0.Args[0]
10270		v.reset(OpAMD64MOVWQSX)
10271		v.AddArg(x)
10272		return true
10273	}
10274	// match: (MOVLQSX (MOVBQSX x))
10275	// result: (MOVBQSX x)
10276	for {
10277		if v_0.Op != OpAMD64MOVBQSX {
10278			break
10279		}
10280		x := v_0.Args[0]
10281		v.reset(OpAMD64MOVBQSX)
10282		v.AddArg(x)
10283		return true
10284	}
10285	return false
10286}
10287func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
10288	v_1 := v.Args[1]
10289	v_0 := v.Args[0]
10290	// match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
10291	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
10292	// result: (MOVLQSX x)
10293	for {
10294		off := auxIntToInt32(v.AuxInt)
10295		sym := auxToSym(v.Aux)
10296		ptr := v_0
10297		if v_1.Op != OpAMD64MOVLstore {
10298			break
10299		}
10300		off2 := auxIntToInt32(v_1.AuxInt)
10301		sym2 := auxToSym(v_1.Aux)
10302		x := v_1.Args[1]
10303		ptr2 := v_1.Args[0]
10304		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10305			break
10306		}
10307		v.reset(OpAMD64MOVLQSX)
10308		v.AddArg(x)
10309		return true
10310	}
10311	// match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
10312	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10313	// result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
10314	for {
10315		off1 := auxIntToInt32(v.AuxInt)
10316		sym1 := auxToSym(v.Aux)
10317		if v_0.Op != OpAMD64LEAQ {
10318			break
10319		}
10320		off2 := auxIntToInt32(v_0.AuxInt)
10321		sym2 := auxToSym(v_0.Aux)
10322		base := v_0.Args[0]
10323		mem := v_1
10324		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10325			break
10326		}
10327		v.reset(OpAMD64MOVLQSXload)
10328		v.AuxInt = int32ToAuxInt(off1 + off2)
10329		v.Aux = symToAux(mergeSym(sym1, sym2))
10330		v.AddArg2(base, mem)
10331		return true
10332	}
10333	return false
10334}
10335func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
10336	v_0 := v.Args[0]
10337	b := v.Block
10338	// match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
10339	// cond: x.Uses == 1 && clobber(x)
10340	// result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
10341	for {
10342		x := v_0
10343		if x.Op != OpAMD64MOVLload {
10344			break
10345		}
10346		off := auxIntToInt32(x.AuxInt)
10347		sym := auxToSym(x.Aux)
10348		mem := x.Args[1]
10349		ptr := x.Args[0]
10350		if !(x.Uses == 1 && clobber(x)) {
10351			break
10352		}
10353		b = x.Block
10354		v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10355		v.copyOf(v0)
10356		v0.AuxInt = int32ToAuxInt(off)
10357		v0.Aux = symToAux(sym)
10358		v0.AddArg2(ptr, mem)
10359		return true
10360	}
10361	// match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem))
10362	// cond: x.Uses == 1 && clobber(x)
10363	// result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
10364	for {
10365		x := v_0
10366		if x.Op != OpAMD64MOVQload {
10367			break
10368		}
10369		off := auxIntToInt32(x.AuxInt)
10370		sym := auxToSym(x.Aux)
10371		mem := x.Args[1]
10372		ptr := x.Args[0]
10373		if !(x.Uses == 1 && clobber(x)) {
10374			break
10375		}
10376		b = x.Block
10377		v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10378		v.copyOf(v0)
10379		v0.AuxInt = int32ToAuxInt(off)
10380		v0.Aux = symToAux(sym)
10381		v0.AddArg2(ptr, mem)
10382		return true
10383	}
10384	// match: (MOVLQZX (ANDLconst [c] x))
10385	// result: (ANDLconst [c] x)
10386	for {
10387		if v_0.Op != OpAMD64ANDLconst {
10388			break
10389		}
10390		c := auxIntToInt32(v_0.AuxInt)
10391		x := v_0.Args[0]
10392		v.reset(OpAMD64ANDLconst)
10393		v.AuxInt = int32ToAuxInt(c)
10394		v.AddArg(x)
10395		return true
10396	}
10397	// match: (MOVLQZX (MOVLQZX x))
10398	// result: (MOVLQZX x)
10399	for {
10400		if v_0.Op != OpAMD64MOVLQZX {
10401			break
10402		}
10403		x := v_0.Args[0]
10404		v.reset(OpAMD64MOVLQZX)
10405		v.AddArg(x)
10406		return true
10407	}
10408	// match: (MOVLQZX (MOVWQZX x))
10409	// result: (MOVWQZX x)
10410	for {
10411		if v_0.Op != OpAMD64MOVWQZX {
10412			break
10413		}
10414		x := v_0.Args[0]
10415		v.reset(OpAMD64MOVWQZX)
10416		v.AddArg(x)
10417		return true
10418	}
10419	// match: (MOVLQZX (MOVBQZX x))
10420	// result: (MOVBQZX x)
10421	for {
10422		if v_0.Op != OpAMD64MOVBQZX {
10423			break
10424		}
10425		x := v_0.Args[0]
10426		v.reset(OpAMD64MOVBQZX)
10427		v.AddArg(x)
10428		return true
10429	}
10430	return false
10431}
10432func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
10433	v_1 := v.Args[1]
10434	v_0 := v.Args[0]
10435	// match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
10436	// cond: is32Bit(int64(off1)+int64(off2))
10437	// result: (MOVLatomicload [off1+off2] {sym} ptr mem)
10438	for {
10439		off1 := auxIntToInt32(v.AuxInt)
10440		sym := auxToSym(v.Aux)
10441		if v_0.Op != OpAMD64ADDQconst {
10442			break
10443		}
10444		off2 := auxIntToInt32(v_0.AuxInt)
10445		ptr := v_0.Args[0]
10446		mem := v_1
10447		if !(is32Bit(int64(off1) + int64(off2))) {
10448			break
10449		}
10450		v.reset(OpAMD64MOVLatomicload)
10451		v.AuxInt = int32ToAuxInt(off1 + off2)
10452		v.Aux = symToAux(sym)
10453		v.AddArg2(ptr, mem)
10454		return true
10455	}
10456	// match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
10457	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10458	// result: (MOVLatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
10459	for {
10460		off1 := auxIntToInt32(v.AuxInt)
10461		sym1 := auxToSym(v.Aux)
10462		if v_0.Op != OpAMD64LEAQ {
10463			break
10464		}
10465		off2 := auxIntToInt32(v_0.AuxInt)
10466		sym2 := auxToSym(v_0.Aux)
10467		ptr := v_0.Args[0]
10468		mem := v_1
10469		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10470			break
10471		}
10472		v.reset(OpAMD64MOVLatomicload)
10473		v.AuxInt = int32ToAuxInt(off1 + off2)
10474		v.Aux = symToAux(mergeSym(sym1, sym2))
10475		v.AddArg2(ptr, mem)
10476		return true
10477	}
10478	return false
10479}
10480func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
10481	v_0 := v.Args[0]
10482	b := v.Block
10483	// match: (MOVLf2i <t> (Arg <u> [off] {sym}))
10484	// cond: t.Size() == u.Size()
10485	// result: @b.Func.Entry (Arg <t> [off] {sym})
10486	for {
10487		t := v.Type
10488		if v_0.Op != OpArg {
10489			break
10490		}
10491		u := v_0.Type
10492		off := auxIntToInt32(v_0.AuxInt)
10493		sym := auxToSym(v_0.Aux)
10494		if !(t.Size() == u.Size()) {
10495			break
10496		}
10497		b = b.Func.Entry
10498		v0 := b.NewValue0(v.Pos, OpArg, t)
10499		v.copyOf(v0)
10500		v0.AuxInt = int32ToAuxInt(off)
10501		v0.Aux = symToAux(sym)
10502		return true
10503	}
10504	return false
10505}
10506func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
10507	v_0 := v.Args[0]
10508	b := v.Block
10509	// match: (MOVLi2f <t> (Arg <u> [off] {sym}))
10510	// cond: t.Size() == u.Size()
10511	// result: @b.Func.Entry (Arg <t> [off] {sym})
10512	for {
10513		t := v.Type
10514		if v_0.Op != OpArg {
10515			break
10516		}
10517		u := v_0.Type
10518		off := auxIntToInt32(v_0.AuxInt)
10519		sym := auxToSym(v_0.Aux)
10520		if !(t.Size() == u.Size()) {
10521			break
10522		}
10523		b = b.Func.Entry
10524		v0 := b.NewValue0(v.Pos, OpArg, t)
10525		v.copyOf(v0)
10526		v0.AuxInt = int32ToAuxInt(off)
10527		v0.Aux = symToAux(sym)
10528		return true
10529	}
10530	return false
10531}
10532func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
10533	v_1 := v.Args[1]
10534	v_0 := v.Args[0]
10535	b := v.Block
10536	config := b.Func.Config
10537	// match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
10538	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
10539	// result: (MOVLQZX x)
10540	for {
10541		off := auxIntToInt32(v.AuxInt)
10542		sym := auxToSym(v.Aux)
10543		ptr := v_0
10544		if v_1.Op != OpAMD64MOVLstore {
10545			break
10546		}
10547		off2 := auxIntToInt32(v_1.AuxInt)
10548		sym2 := auxToSym(v_1.Aux)
10549		x := v_1.Args[1]
10550		ptr2 := v_1.Args[0]
10551		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10552			break
10553		}
10554		v.reset(OpAMD64MOVLQZX)
10555		v.AddArg(x)
10556		return true
10557	}
10558	// match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
10559	// cond: is32Bit(int64(off1)+int64(off2))
10560	// result: (MOVLload [off1+off2] {sym} ptr mem)
10561	for {
10562		off1 := auxIntToInt32(v.AuxInt)
10563		sym := auxToSym(v.Aux)
10564		if v_0.Op != OpAMD64ADDQconst {
10565			break
10566		}
10567		off2 := auxIntToInt32(v_0.AuxInt)
10568		ptr := v_0.Args[0]
10569		mem := v_1
10570		if !(is32Bit(int64(off1) + int64(off2))) {
10571			break
10572		}
10573		v.reset(OpAMD64MOVLload)
10574		v.AuxInt = int32ToAuxInt(off1 + off2)
10575		v.Aux = symToAux(sym)
10576		v.AddArg2(ptr, mem)
10577		return true
10578	}
10579	// match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
10580	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10581	// result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
10582	for {
10583		off1 := auxIntToInt32(v.AuxInt)
10584		sym1 := auxToSym(v.Aux)
10585		if v_0.Op != OpAMD64LEAQ {
10586			break
10587		}
10588		off2 := auxIntToInt32(v_0.AuxInt)
10589		sym2 := auxToSym(v_0.Aux)
10590		base := v_0.Args[0]
10591		mem := v_1
10592		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10593			break
10594		}
10595		v.reset(OpAMD64MOVLload)
10596		v.AuxInt = int32ToAuxInt(off1 + off2)
10597		v.Aux = symToAux(mergeSym(sym1, sym2))
10598		v.AddArg2(base, mem)
10599		return true
10600	}
10601	// match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _))
10602	// result: (MOVLf2i val)
10603	for {
10604		off := auxIntToInt32(v.AuxInt)
10605		sym := auxToSym(v.Aux)
10606		ptr := v_0
10607		if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
10608			break
10609		}
10610		val := v_1.Args[1]
10611		if ptr != v_1.Args[0] {
10612			break
10613		}
10614		v.reset(OpAMD64MOVLf2i)
10615		v.AddArg(val)
10616		return true
10617	}
10618	// match: (MOVLload [off] {sym} (SB) _)
10619	// cond: symIsRO(sym)
10620	// result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
10621	for {
10622		off := auxIntToInt32(v.AuxInt)
10623		sym := auxToSym(v.Aux)
10624		if v_0.Op != OpSB || !(symIsRO(sym)) {
10625			break
10626		}
10627		v.reset(OpAMD64MOVQconst)
10628		v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
10629		return true
10630	}
10631	return false
10632}
10633func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
10634	v_2 := v.Args[2]
10635	v_1 := v.Args[1]
10636	v_0 := v.Args[0]
10637	// match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
10638	// result: (MOVLstore [off] {sym} ptr x mem)
10639	for {
10640		off := auxIntToInt32(v.AuxInt)
10641		sym := auxToSym(v.Aux)
10642		ptr := v_0
10643		if v_1.Op != OpAMD64MOVLQSX {
10644			break
10645		}
10646		x := v_1.Args[0]
10647		mem := v_2
10648		v.reset(OpAMD64MOVLstore)
10649		v.AuxInt = int32ToAuxInt(off)
10650		v.Aux = symToAux(sym)
10651		v.AddArg3(ptr, x, mem)
10652		return true
10653	}
10654	// match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
10655	// result: (MOVLstore [off] {sym} ptr x mem)
10656	for {
10657		off := auxIntToInt32(v.AuxInt)
10658		sym := auxToSym(v.Aux)
10659		ptr := v_0
10660		if v_1.Op != OpAMD64MOVLQZX {
10661			break
10662		}
10663		x := v_1.Args[0]
10664		mem := v_2
10665		v.reset(OpAMD64MOVLstore)
10666		v.AuxInt = int32ToAuxInt(off)
10667		v.Aux = symToAux(sym)
10668		v.AddArg3(ptr, x, mem)
10669		return true
10670	}
10671	// match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
10672	// cond: is32Bit(int64(off1)+int64(off2))
10673	// result: (MOVLstore [off1+off2] {sym} ptr val mem)
10674	for {
10675		off1 := auxIntToInt32(v.AuxInt)
10676		sym := auxToSym(v.Aux)
10677		if v_0.Op != OpAMD64ADDQconst {
10678			break
10679		}
10680		off2 := auxIntToInt32(v_0.AuxInt)
10681		ptr := v_0.Args[0]
10682		val := v_1
10683		mem := v_2
10684		if !(is32Bit(int64(off1) + int64(off2))) {
10685			break
10686		}
10687		v.reset(OpAMD64MOVLstore)
10688		v.AuxInt = int32ToAuxInt(off1 + off2)
10689		v.Aux = symToAux(sym)
10690		v.AddArg3(ptr, val, mem)
10691		return true
10692	}
10693	// match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
10694	// result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
10695	for {
10696		off := auxIntToInt32(v.AuxInt)
10697		sym := auxToSym(v.Aux)
10698		ptr := v_0
10699		if v_1.Op != OpAMD64MOVLconst {
10700			break
10701		}
10702		c := auxIntToInt32(v_1.AuxInt)
10703		mem := v_2
10704		v.reset(OpAMD64MOVLstoreconst)
10705		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
10706		v.Aux = symToAux(sym)
10707		v.AddArg2(ptr, mem)
10708		return true
10709	}
10710	// match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem)
10711	// result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
10712	for {
10713		off := auxIntToInt32(v.AuxInt)
10714		sym := auxToSym(v.Aux)
10715		ptr := v_0
10716		if v_1.Op != OpAMD64MOVQconst {
10717			break
10718		}
10719		c := auxIntToInt64(v_1.AuxInt)
10720		mem := v_2
10721		v.reset(OpAMD64MOVLstoreconst)
10722		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
10723		v.Aux = symToAux(sym)
10724		v.AddArg2(ptr, mem)
10725		return true
10726	}
10727	// match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
10728	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10729	// result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
10730	for {
10731		off1 := auxIntToInt32(v.AuxInt)
10732		sym1 := auxToSym(v.Aux)
10733		if v_0.Op != OpAMD64LEAQ {
10734			break
10735		}
10736		off2 := auxIntToInt32(v_0.AuxInt)
10737		sym2 := auxToSym(v_0.Aux)
10738		base := v_0.Args[0]
10739		val := v_1
10740		mem := v_2
10741		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10742			break
10743		}
10744		v.reset(OpAMD64MOVLstore)
10745		v.AuxInt = int32ToAuxInt(off1 + off2)
10746		v.Aux = symToAux(mergeSym(sym1, sym2))
10747		v.AddArg3(base, val, mem)
10748		return true
10749	}
10750	// match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
10751	// cond: y.Uses==1 && clobber(y)
10752	// result: (ADDLmodify [off] {sym} ptr x mem)
10753	for {
10754		off := auxIntToInt32(v.AuxInt)
10755		sym := auxToSym(v.Aux)
10756		ptr := v_0
10757		y := v_1
10758		if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10759			break
10760		}
10761		mem := y.Args[2]
10762		x := y.Args[0]
10763		if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10764			break
10765		}
10766		v.reset(OpAMD64ADDLmodify)
10767		v.AuxInt = int32ToAuxInt(off)
10768		v.Aux = symToAux(sym)
10769		v.AddArg3(ptr, x, mem)
10770		return true
10771	}
10772	// match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
10773	// cond: y.Uses==1 && clobber(y)
10774	// result: (ANDLmodify [off] {sym} ptr x mem)
10775	for {
10776		off := auxIntToInt32(v.AuxInt)
10777		sym := auxToSym(v.Aux)
10778		ptr := v_0
10779		y := v_1
10780		if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10781			break
10782		}
10783		mem := y.Args[2]
10784		x := y.Args[0]
10785		if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10786			break
10787		}
10788		v.reset(OpAMD64ANDLmodify)
10789		v.AuxInt = int32ToAuxInt(off)
10790		v.Aux = symToAux(sym)
10791		v.AddArg3(ptr, x, mem)
10792		return true
10793	}
10794	// match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
10795	// cond: y.Uses==1 && clobber(y)
10796	// result: (ORLmodify [off] {sym} ptr x mem)
10797	for {
10798		off := auxIntToInt32(v.AuxInt)
10799		sym := auxToSym(v.Aux)
10800		ptr := v_0
10801		y := v_1
10802		if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10803			break
10804		}
10805		mem := y.Args[2]
10806		x := y.Args[0]
10807		if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10808			break
10809		}
10810		v.reset(OpAMD64ORLmodify)
10811		v.AuxInt = int32ToAuxInt(off)
10812		v.Aux = symToAux(sym)
10813		v.AddArg3(ptr, x, mem)
10814		return true
10815	}
10816	// match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
10817	// cond: y.Uses==1 && clobber(y)
10818	// result: (XORLmodify [off] {sym} ptr x mem)
10819	for {
10820		off := auxIntToInt32(v.AuxInt)
10821		sym := auxToSym(v.Aux)
10822		ptr := v_0
10823		y := v_1
10824		if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10825			break
10826		}
10827		mem := y.Args[2]
10828		x := y.Args[0]
10829		if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10830			break
10831		}
10832		v.reset(OpAMD64XORLmodify)
10833		v.AuxInt = int32ToAuxInt(off)
10834		v.Aux = symToAux(sym)
10835		v.AddArg3(ptr, x, mem)
10836		return true
10837	}
10838	// match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
10839	// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
10840	// result: (ADDLmodify [off] {sym} ptr x mem)
10841	for {
10842		off := auxIntToInt32(v.AuxInt)
10843		sym := auxToSym(v.Aux)
10844		ptr := v_0
10845		y := v_1
10846		if y.Op != OpAMD64ADDL {
10847			break
10848		}
10849		_ = y.Args[1]
10850		y_0 := y.Args[0]
10851		y_1 := y.Args[1]
10852		for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10853			l := y_0
10854			if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10855				continue
10856			}
10857			mem := l.Args[1]
10858			if ptr != l.Args[0] {
10859				continue
10860			}
10861			x := y_1
10862			if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10863				continue
10864			}
10865			v.reset(OpAMD64ADDLmodify)
10866			v.AuxInt = int32ToAuxInt(off)
10867			v.Aux = symToAux(sym)
10868			v.AddArg3(ptr, x, mem)
10869			return true
10870		}
10871		break
10872	}
10873	// match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
10874	// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
10875	// result: (SUBLmodify [off] {sym} ptr x mem)
10876	for {
10877		off := auxIntToInt32(v.AuxInt)
10878		sym := auxToSym(v.Aux)
10879		ptr := v_0
10880		y := v_1
10881		if y.Op != OpAMD64SUBL {
10882			break
10883		}
10884		x := y.Args[1]
10885		l := y.Args[0]
10886		if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10887			break
10888		}
10889		mem := l.Args[1]
10890		if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10891			break
10892		}
10893		v.reset(OpAMD64SUBLmodify)
10894		v.AuxInt = int32ToAuxInt(off)
10895		v.Aux = symToAux(sym)
10896		v.AddArg3(ptr, x, mem)
10897		return true
10898	}
10899	// match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
10900	// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
10901	// result: (ANDLmodify [off] {sym} ptr x mem)
10902	for {
10903		off := auxIntToInt32(v.AuxInt)
10904		sym := auxToSym(v.Aux)
10905		ptr := v_0
10906		y := v_1
10907		if y.Op != OpAMD64ANDL {
10908			break
10909		}
10910		_ = y.Args[1]
10911		y_0 := y.Args[0]
10912		y_1 := y.Args[1]
10913		for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10914			l := y_0
10915			if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10916				continue
10917			}
10918			mem := l.Args[1]
10919			if ptr != l.Args[0] {
10920				continue
10921			}
10922			x := y_1
10923			if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10924				continue
10925			}
10926			v.reset(OpAMD64ANDLmodify)
10927			v.AuxInt = int32ToAuxInt(off)
10928			v.Aux = symToAux(sym)
10929			v.AddArg3(ptr, x, mem)
10930			return true
10931		}
10932		break
10933	}
10934	// match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem)
10935	// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
10936	// result: (ORLmodify [off] {sym} ptr x mem)
10937	for {
10938		off := auxIntToInt32(v.AuxInt)
10939		sym := auxToSym(v.Aux)
10940		ptr := v_0
10941		y := v_1
10942		if y.Op != OpAMD64ORL {
10943			break
10944		}
10945		_ = y.Args[1]
10946		y_0 := y.Args[0]
10947		y_1 := y.Args[1]
10948		for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10949			l := y_0
10950			if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10951				continue
10952			}
10953			mem := l.Args[1]
10954			if ptr != l.Args[0] {
10955				continue
10956			}
10957			x := y_1
10958			if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10959				continue
10960			}
10961			v.reset(OpAMD64ORLmodify)
10962			v.AuxInt = int32ToAuxInt(off)
10963			v.Aux = symToAux(sym)
10964			v.AddArg3(ptr, x, mem)
10965			return true
10966		}
10967		break
10968	}
10969	// match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
10970	// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
10971	// result: (XORLmodify [off] {sym} ptr x mem)
10972	for {
10973		off := auxIntToInt32(v.AuxInt)
10974		sym := auxToSym(v.Aux)
10975		ptr := v_0
10976		y := v_1
10977		if y.Op != OpAMD64XORL {
10978			break
10979		}
10980		_ = y.Args[1]
10981		y_0 := y.Args[0]
10982		y_1 := y.Args[1]
10983		for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10984			l := y_0
10985			if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10986				continue
10987			}
10988			mem := l.Args[1]
10989			if ptr != l.Args[0] {
10990				continue
10991			}
10992			x := y_1
10993			if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10994				continue
10995			}
10996			v.reset(OpAMD64XORLmodify)
10997			v.AuxInt = int32ToAuxInt(off)
10998			v.Aux = symToAux(sym)
10999			v.AddArg3(ptr, x, mem)
11000			return true
11001		}
11002		break
11003	}
11004	// match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
11005	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
11006	// result: (ADDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
11007	for {
11008		off := auxIntToInt32(v.AuxInt)
11009		sym := auxToSym(v.Aux)
11010		ptr := v_0
11011		a := v_1
11012		if a.Op != OpAMD64ADDLconst {
11013			break
11014		}
11015		c := auxIntToInt32(a.AuxInt)
11016		l := a.Args[0]
11017		if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11018			break
11019		}
11020		mem := l.Args[1]
11021		ptr2 := l.Args[0]
11022		if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11023			break
11024		}
11025		v.reset(OpAMD64ADDLconstmodify)
11026		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11027		v.Aux = symToAux(sym)
11028		v.AddArg2(ptr, mem)
11029		return true
11030	}
11031	// match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
11032	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
11033	// result: (ANDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
11034	for {
11035		off := auxIntToInt32(v.AuxInt)
11036		sym := auxToSym(v.Aux)
11037		ptr := v_0
11038		a := v_1
11039		if a.Op != OpAMD64ANDLconst {
11040			break
11041		}
11042		c := auxIntToInt32(a.AuxInt)
11043		l := a.Args[0]
11044		if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11045			break
11046		}
11047		mem := l.Args[1]
11048		ptr2 := l.Args[0]
11049		if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11050			break
11051		}
11052		v.reset(OpAMD64ANDLconstmodify)
11053		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11054		v.Aux = symToAux(sym)
11055		v.AddArg2(ptr, mem)
11056		return true
11057	}
11058	// match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
11059	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
11060	// result: (ORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
11061	for {
11062		off := auxIntToInt32(v.AuxInt)
11063		sym := auxToSym(v.Aux)
11064		ptr := v_0
11065		a := v_1
11066		if a.Op != OpAMD64ORLconst {
11067			break
11068		}
11069		c := auxIntToInt32(a.AuxInt)
11070		l := a.Args[0]
11071		if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11072			break
11073		}
11074		mem := l.Args[1]
11075		ptr2 := l.Args[0]
11076		if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11077			break
11078		}
11079		v.reset(OpAMD64ORLconstmodify)
11080		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11081		v.Aux = symToAux(sym)
11082		v.AddArg2(ptr, mem)
11083		return true
11084	}
11085	// match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
11086	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
11087	// result: (XORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
11088	for {
11089		off := auxIntToInt32(v.AuxInt)
11090		sym := auxToSym(v.Aux)
11091		ptr := v_0
11092		a := v_1
11093		if a.Op != OpAMD64XORLconst {
11094			break
11095		}
11096		c := auxIntToInt32(a.AuxInt)
11097		l := a.Args[0]
11098		if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11099			break
11100		}
11101		mem := l.Args[1]
11102		ptr2 := l.Args[0]
11103		if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11104			break
11105		}
11106		v.reset(OpAMD64XORLconstmodify)
11107		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11108		v.Aux = symToAux(sym)
11109		v.AddArg2(ptr, mem)
11110		return true
11111	}
11112	// match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
11113	// result: (MOVSSstore [off] {sym} ptr val mem)
11114	for {
11115		off := auxIntToInt32(v.AuxInt)
11116		sym := auxToSym(v.Aux)
11117		ptr := v_0
11118		if v_1.Op != OpAMD64MOVLf2i {
11119			break
11120		}
11121		val := v_1.Args[0]
11122		mem := v_2
11123		v.reset(OpAMD64MOVSSstore)
11124		v.AuxInt = int32ToAuxInt(off)
11125		v.Aux = symToAux(sym)
11126		v.AddArg3(ptr, val, mem)
11127		return true
11128	}
11129	// match: (MOVLstore [i] {s} p x:(BSWAPL w) mem)
11130	// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
11131	// result: (MOVBELstore [i] {s} p w mem)
11132	for {
11133		i := auxIntToInt32(v.AuxInt)
11134		s := auxToSym(v.Aux)
11135		p := v_0
11136		x := v_1
11137		if x.Op != OpAMD64BSWAPL {
11138			break
11139		}
11140		w := x.Args[0]
11141		mem := v_2
11142		if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
11143			break
11144		}
11145		v.reset(OpAMD64MOVBELstore)
11146		v.AuxInt = int32ToAuxInt(i)
11147		v.Aux = symToAux(s)
11148		v.AddArg3(p, w, mem)
11149		return true
11150	}
11151	return false
11152}
11153func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
11154	v_1 := v.Args[1]
11155	v_0 := v.Args[0]
11156	// match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
11157	// cond: ValAndOff(sc).canAdd32(off)
11158	// result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
11159	for {
11160		sc := auxIntToValAndOff(v.AuxInt)
11161		s := auxToSym(v.Aux)
11162		if v_0.Op != OpAMD64ADDQconst {
11163			break
11164		}
11165		off := auxIntToInt32(v_0.AuxInt)
11166		ptr := v_0.Args[0]
11167		mem := v_1
11168		if !(ValAndOff(sc).canAdd32(off)) {
11169			break
11170		}
11171		v.reset(OpAMD64MOVLstoreconst)
11172		v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11173		v.Aux = symToAux(s)
11174		v.AddArg2(ptr, mem)
11175		return true
11176	}
11177	// match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
11178	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
11179	// result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
11180	for {
11181		sc := auxIntToValAndOff(v.AuxInt)
11182		sym1 := auxToSym(v.Aux)
11183		if v_0.Op != OpAMD64LEAQ {
11184			break
11185		}
11186		off := auxIntToInt32(v_0.AuxInt)
11187		sym2 := auxToSym(v_0.Aux)
11188		ptr := v_0.Args[0]
11189		mem := v_1
11190		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11191			break
11192		}
11193		v.reset(OpAMD64MOVLstoreconst)
11194		v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11195		v.Aux = symToAux(mergeSym(sym1, sym2))
11196		v.AddArg2(ptr, mem)
11197		return true
11198	}
11199	return false
11200}
11201func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
11202	v_1 := v.Args[1]
11203	v_0 := v.Args[0]
11204	// match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem)
11205	// cond: is32Bit(int64(off1)+int64(off2))
11206	// result: (MOVOload [off1+off2] {sym} ptr mem)
11207	for {
11208		off1 := auxIntToInt32(v.AuxInt)
11209		sym := auxToSym(v.Aux)
11210		if v_0.Op != OpAMD64ADDQconst {
11211			break
11212		}
11213		off2 := auxIntToInt32(v_0.AuxInt)
11214		ptr := v_0.Args[0]
11215		mem := v_1
11216		if !(is32Bit(int64(off1) + int64(off2))) {
11217			break
11218		}
11219		v.reset(OpAMD64MOVOload)
11220		v.AuxInt = int32ToAuxInt(off1 + off2)
11221		v.Aux = symToAux(sym)
11222		v.AddArg2(ptr, mem)
11223		return true
11224	}
11225	// match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
11226	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
11227	// result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
11228	for {
11229		off1 := auxIntToInt32(v.AuxInt)
11230		sym1 := auxToSym(v.Aux)
11231		if v_0.Op != OpAMD64LEAQ {
11232			break
11233		}
11234		off2 := auxIntToInt32(v_0.AuxInt)
11235		sym2 := auxToSym(v_0.Aux)
11236		base := v_0.Args[0]
11237		mem := v_1
11238		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11239			break
11240		}
11241		v.reset(OpAMD64MOVOload)
11242		v.AuxInt = int32ToAuxInt(off1 + off2)
11243		v.Aux = symToAux(mergeSym(sym1, sym2))
11244		v.AddArg2(base, mem)
11245		return true
11246	}
11247	return false
11248}
11249func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
11250	v_2 := v.Args[2]
11251	v_1 := v.Args[1]
11252	v_0 := v.Args[0]
11253	b := v.Block
11254	config := b.Func.Config
11255	typ := &b.Func.Config.Types
11256	// match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
11257	// cond: is32Bit(int64(off1)+int64(off2))
11258	// result: (MOVOstore [off1+off2] {sym} ptr val mem)
11259	for {
11260		off1 := auxIntToInt32(v.AuxInt)
11261		sym := auxToSym(v.Aux)
11262		if v_0.Op != OpAMD64ADDQconst {
11263			break
11264		}
11265		off2 := auxIntToInt32(v_0.AuxInt)
11266		ptr := v_0.Args[0]
11267		val := v_1
11268		mem := v_2
11269		if !(is32Bit(int64(off1) + int64(off2))) {
11270			break
11271		}
11272		v.reset(OpAMD64MOVOstore)
11273		v.AuxInt = int32ToAuxInt(off1 + off2)
11274		v.Aux = symToAux(sym)
11275		v.AddArg3(ptr, val, mem)
11276		return true
11277	}
11278	// match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
11279	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
11280	// result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
11281	for {
11282		off1 := auxIntToInt32(v.AuxInt)
11283		sym1 := auxToSym(v.Aux)
11284		if v_0.Op != OpAMD64LEAQ {
11285			break
11286		}
11287		off2 := auxIntToInt32(v_0.AuxInt)
11288		sym2 := auxToSym(v_0.Aux)
11289		base := v_0.Args[0]
11290		val := v_1
11291		mem := v_2
11292		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11293			break
11294		}
11295		v.reset(OpAMD64MOVOstore)
11296		v.AuxInt = int32ToAuxInt(off1 + off2)
11297		v.Aux = symToAux(mergeSym(sym1, sym2))
11298		v.AddArg3(base, val, mem)
11299		return true
11300	}
11301	// match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem)
11302	// cond: symIsRO(srcSym)
11303	// result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
11304	for {
11305		dstOff := auxIntToInt32(v.AuxInt)
11306		dstSym := auxToSym(v.Aux)
11307		ptr := v_0
11308		if v_1.Op != OpAMD64MOVOload {
11309			break
11310		}
11311		srcOff := auxIntToInt32(v_1.AuxInt)
11312		srcSym := auxToSym(v_1.Aux)
11313		v_1_0 := v_1.Args[0]
11314		if v_1_0.Op != OpSB {
11315			break
11316		}
11317		mem := v_2
11318		if !(symIsRO(srcSym)) {
11319			break
11320		}
11321		v.reset(OpAMD64MOVQstore)
11322		v.AuxInt = int32ToAuxInt(dstOff + 8)
11323		v.Aux = symToAux(dstSym)
11324		v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11325		v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
11326		v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
11327		v1.AuxInt = int32ToAuxInt(dstOff)
11328		v1.Aux = symToAux(dstSym)
11329		v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11330		v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
11331		v1.AddArg3(ptr, v2, mem)
11332		v.AddArg3(ptr, v0, v1)
11333		return true
11334	}
11335	return false
11336}
11337func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
11338	v_1 := v.Args[1]
11339	v_0 := v.Args[0]
11340	// match: (MOVOstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
11341	// cond: ValAndOff(sc).canAdd32(off)
11342	// result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
11343	for {
11344		sc := auxIntToValAndOff(v.AuxInt)
11345		s := auxToSym(v.Aux)
11346		if v_0.Op != OpAMD64ADDQconst {
11347			break
11348		}
11349		off := auxIntToInt32(v_0.AuxInt)
11350		ptr := v_0.Args[0]
11351		mem := v_1
11352		if !(ValAndOff(sc).canAdd32(off)) {
11353			break
11354		}
11355		v.reset(OpAMD64MOVOstoreconst)
11356		v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11357		v.Aux = symToAux(s)
11358		v.AddArg2(ptr, mem)
11359		return true
11360	}
11361	// match: (MOVOstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
11362	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
11363	// result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
11364	for {
11365		sc := auxIntToValAndOff(v.AuxInt)
11366		sym1 := auxToSym(v.Aux)
11367		if v_0.Op != OpAMD64LEAQ {
11368			break
11369		}
11370		off := auxIntToInt32(v_0.AuxInt)
11371		sym2 := auxToSym(v_0.Aux)
11372		ptr := v_0.Args[0]
11373		mem := v_1
11374		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11375			break
11376		}
11377		v.reset(OpAMD64MOVOstoreconst)
11378		v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11379		v.Aux = symToAux(mergeSym(sym1, sym2))
11380		v.AddArg2(ptr, mem)
11381		return true
11382	}
11383	return false
11384}
11385func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
11386	v_1 := v.Args[1]
11387	v_0 := v.Args[0]
11388	// match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
11389	// cond: is32Bit(int64(off1)+int64(off2))
11390	// result: (MOVQatomicload [off1+off2] {sym} ptr mem)
11391	for {
11392		off1 := auxIntToInt32(v.AuxInt)
11393		sym := auxToSym(v.Aux)
11394		if v_0.Op != OpAMD64ADDQconst {
11395			break
11396		}
11397		off2 := auxIntToInt32(v_0.AuxInt)
11398		ptr := v_0.Args[0]
11399		mem := v_1
11400		if !(is32Bit(int64(off1) + int64(off2))) {
11401			break
11402		}
11403		v.reset(OpAMD64MOVQatomicload)
11404		v.AuxInt = int32ToAuxInt(off1 + off2)
11405		v.Aux = symToAux(sym)
11406		v.AddArg2(ptr, mem)
11407		return true
11408	}
11409	// match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
11410	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
11411	// result: (MOVQatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
11412	for {
11413		off1 := auxIntToInt32(v.AuxInt)
11414		sym1 := auxToSym(v.Aux)
11415		if v_0.Op != OpAMD64LEAQ {
11416			break
11417		}
11418		off2 := auxIntToInt32(v_0.AuxInt)
11419		sym2 := auxToSym(v_0.Aux)
11420		ptr := v_0.Args[0]
11421		mem := v_1
11422		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11423			break
11424		}
11425		v.reset(OpAMD64MOVQatomicload)
11426		v.AuxInt = int32ToAuxInt(off1 + off2)
11427		v.Aux = symToAux(mergeSym(sym1, sym2))
11428		v.AddArg2(ptr, mem)
11429		return true
11430	}
11431	return false
11432}
11433func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
11434	v_0 := v.Args[0]
11435	b := v.Block
11436	// match: (MOVQf2i <t> (Arg <u> [off] {sym}))
11437	// cond: t.Size() == u.Size()
11438	// result: @b.Func.Entry (Arg <t> [off] {sym})
11439	for {
11440		t := v.Type
11441		if v_0.Op != OpArg {
11442			break
11443		}
11444		u := v_0.Type
11445		off := auxIntToInt32(v_0.AuxInt)
11446		sym := auxToSym(v_0.Aux)
11447		if !(t.Size() == u.Size()) {
11448			break
11449		}
11450		b = b.Func.Entry
11451		v0 := b.NewValue0(v.Pos, OpArg, t)
11452		v.copyOf(v0)
11453		v0.AuxInt = int32ToAuxInt(off)
11454		v0.Aux = symToAux(sym)
11455		return true
11456	}
11457	return false
11458}
11459func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
11460	v_0 := v.Args[0]
11461	b := v.Block
11462	// match: (MOVQi2f <t> (Arg <u> [off] {sym}))
11463	// cond: t.Size() == u.Size()
11464	// result: @b.Func.Entry (Arg <t> [off] {sym})
11465	for {
11466		t := v.Type
11467		if v_0.Op != OpArg {
11468			break
11469		}
11470		u := v_0.Type
11471		off := auxIntToInt32(v_0.AuxInt)
11472		sym := auxToSym(v_0.Aux)
11473		if !(t.Size() == u.Size()) {
11474			break
11475		}
11476		b = b.Func.Entry
11477		v0 := b.NewValue0(v.Pos, OpArg, t)
11478		v.copyOf(v0)
11479		v0.AuxInt = int32ToAuxInt(off)
11480		v0.Aux = symToAux(sym)
11481		return true
11482	}
11483	return false
11484}
11485func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
11486	v_1 := v.Args[1]
11487	v_0 := v.Args[0]
11488	b := v.Block
11489	config := b.Func.Config
11490	// match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
11491	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
11492	// result: x
11493	for {
11494		off := auxIntToInt32(v.AuxInt)
11495		sym := auxToSym(v.Aux)
11496		ptr := v_0
11497		if v_1.Op != OpAMD64MOVQstore {
11498			break
11499		}
11500		off2 := auxIntToInt32(v_1.AuxInt)
11501		sym2 := auxToSym(v_1.Aux)
11502		x := v_1.Args[1]
11503		ptr2 := v_1.Args[0]
11504		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11505			break
11506		}
11507		v.copyOf(x)
11508		return true
11509	}
11510	// match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
11511	// cond: is32Bit(int64(off1)+int64(off2))
11512	// result: (MOVQload [off1+off2] {sym} ptr mem)
11513	for {
11514		off1 := auxIntToInt32(v.AuxInt)
11515		sym := auxToSym(v.Aux)
11516		if v_0.Op != OpAMD64ADDQconst {
11517			break
11518		}
11519		off2 := auxIntToInt32(v_0.AuxInt)
11520		ptr := v_0.Args[0]
11521		mem := v_1
11522		if !(is32Bit(int64(off1) + int64(off2))) {
11523			break
11524		}
11525		v.reset(OpAMD64MOVQload)
11526		v.AuxInt = int32ToAuxInt(off1 + off2)
11527		v.Aux = symToAux(sym)
11528		v.AddArg2(ptr, mem)
11529		return true
11530	}
11531	// match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
11532	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
11533	// result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
11534	for {
11535		off1 := auxIntToInt32(v.AuxInt)
11536		sym1 := auxToSym(v.Aux)
11537		if v_0.Op != OpAMD64LEAQ {
11538			break
11539		}
11540		off2 := auxIntToInt32(v_0.AuxInt)
11541		sym2 := auxToSym(v_0.Aux)
11542		base := v_0.Args[0]
11543		mem := v_1
11544		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11545			break
11546		}
11547		v.reset(OpAMD64MOVQload)
11548		v.AuxInt = int32ToAuxInt(off1 + off2)
11549		v.Aux = symToAux(mergeSym(sym1, sym2))
11550		v.AddArg2(base, mem)
11551		return true
11552	}
11553	// match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _))
11554	// result: (MOVQf2i val)
11555	for {
11556		off := auxIntToInt32(v.AuxInt)
11557		sym := auxToSym(v.Aux)
11558		ptr := v_0
11559		if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
11560			break
11561		}
11562		val := v_1.Args[1]
11563		if ptr != v_1.Args[0] {
11564			break
11565		}
11566		v.reset(OpAMD64MOVQf2i)
11567		v.AddArg(val)
11568		return true
11569	}
11570	// match: (MOVQload [off] {sym} (SB) _)
11571	// cond: symIsRO(sym)
11572	// result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
11573	for {
11574		off := auxIntToInt32(v.AuxInt)
11575		sym := auxToSym(v.Aux)
11576		if v_0.Op != OpSB || !(symIsRO(sym)) {
11577			break
11578		}
11579		v.reset(OpAMD64MOVQconst)
11580		v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11581		return true
11582	}
11583	return false
11584}
11585func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
11586	v_2 := v.Args[2]
11587	v_1 := v.Args[1]
11588	v_0 := v.Args[0]
11589	// match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
11590	// cond: is32Bit(int64(off1)+int64(off2))
11591	// result: (MOVQstore [off1+off2] {sym} ptr val mem)
11592	for {
11593		off1 := auxIntToInt32(v.AuxInt)
11594		sym := auxToSym(v.Aux)
11595		if v_0.Op != OpAMD64ADDQconst {
11596			break
11597		}
11598		off2 := auxIntToInt32(v_0.AuxInt)
11599		ptr := v_0.Args[0]
11600		val := v_1
11601		mem := v_2
11602		if !(is32Bit(int64(off1) + int64(off2))) {
11603			break
11604		}
11605		v.reset(OpAMD64MOVQstore)
11606		v.AuxInt = int32ToAuxInt(off1 + off2)
11607		v.Aux = symToAux(sym)
11608		v.AddArg3(ptr, val, mem)
11609		return true
11610	}
11611	// match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
11612	// cond: validVal(c)
11613	// result: (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
11614	for {
11615		off := auxIntToInt32(v.AuxInt)
11616		sym := auxToSym(v.Aux)
11617		ptr := v_0
11618		if v_1.Op != OpAMD64MOVQconst {
11619			break
11620		}
11621		c := auxIntToInt64(v_1.AuxInt)
11622		mem := v_2
11623		if !(validVal(c)) {
11624			break
11625		}
11626		v.reset(OpAMD64MOVQstoreconst)
11627		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11628		v.Aux = symToAux(sym)
11629		v.AddArg2(ptr, mem)
11630		return true
11631	}
11632	// match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
11633	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
11634	// result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
11635	for {
11636		off1 := auxIntToInt32(v.AuxInt)
11637		sym1 := auxToSym(v.Aux)
11638		if v_0.Op != OpAMD64LEAQ {
11639			break
11640		}
11641		off2 := auxIntToInt32(v_0.AuxInt)
11642		sym2 := auxToSym(v_0.Aux)
11643		base := v_0.Args[0]
11644		val := v_1
11645		mem := v_2
11646		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11647			break
11648		}
11649		v.reset(OpAMD64MOVQstore)
11650		v.AuxInt = int32ToAuxInt(off1 + off2)
11651		v.Aux = symToAux(mergeSym(sym1, sym2))
11652		v.AddArg3(base, val, mem)
11653		return true
11654	}
11655	// match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem)
11656	// cond: y.Uses==1 && clobber(y)
11657	// result: (ADDQmodify [off] {sym} ptr x mem)
11658	for {
11659		off := auxIntToInt32(v.AuxInt)
11660		sym := auxToSym(v.Aux)
11661		ptr := v_0
11662		y := v_1
11663		if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11664			break
11665		}
11666		mem := y.Args[2]
11667		x := y.Args[0]
11668		if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11669			break
11670		}
11671		v.reset(OpAMD64ADDQmodify)
11672		v.AuxInt = int32ToAuxInt(off)
11673		v.Aux = symToAux(sym)
11674		v.AddArg3(ptr, x, mem)
11675		return true
11676	}
11677	// match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem)
11678	// cond: y.Uses==1 && clobber(y)
11679	// result: (ANDQmodify [off] {sym} ptr x mem)
11680	for {
11681		off := auxIntToInt32(v.AuxInt)
11682		sym := auxToSym(v.Aux)
11683		ptr := v_0
11684		y := v_1
11685		if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11686			break
11687		}
11688		mem := y.Args[2]
11689		x := y.Args[0]
11690		if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11691			break
11692		}
11693		v.reset(OpAMD64ANDQmodify)
11694		v.AuxInt = int32ToAuxInt(off)
11695		v.Aux = symToAux(sym)
11696		v.AddArg3(ptr, x, mem)
11697		return true
11698	}
11699	// match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem)
11700	// cond: y.Uses==1 && clobber(y)
11701	// result: (ORQmodify [off] {sym} ptr x mem)
11702	for {
11703		off := auxIntToInt32(v.AuxInt)
11704		sym := auxToSym(v.Aux)
11705		ptr := v_0
11706		y := v_1
11707		if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11708			break
11709		}
11710		mem := y.Args[2]
11711		x := y.Args[0]
11712		if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11713			break
11714		}
11715		v.reset(OpAMD64ORQmodify)
11716		v.AuxInt = int32ToAuxInt(off)
11717		v.Aux = symToAux(sym)
11718		v.AddArg3(ptr, x, mem)
11719		return true
11720	}
11721	// match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem)
11722	// cond: y.Uses==1 && clobber(y)
11723	// result: (XORQmodify [off] {sym} ptr x mem)
11724	for {
11725		off := auxIntToInt32(v.AuxInt)
11726		sym := auxToSym(v.Aux)
11727		ptr := v_0
11728		y := v_1
11729		if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11730			break
11731		}
11732		mem := y.Args[2]
11733		x := y.Args[0]
11734		if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11735			break
11736		}
11737		v.reset(OpAMD64XORQmodify)
11738		v.AuxInt = int32ToAuxInt(off)
11739		v.Aux = symToAux(sym)
11740		v.AddArg3(ptr, x, mem)
11741		return true
11742	}
11743	// match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
11744	// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
11745	// result: (ADDQmodify [off] {sym} ptr x mem)
11746	for {
11747		off := auxIntToInt32(v.AuxInt)
11748		sym := auxToSym(v.Aux)
11749		ptr := v_0
11750		y := v_1
11751		if y.Op != OpAMD64ADDQ {
11752			break
11753		}
11754		_ = y.Args[1]
11755		y_0 := y.Args[0]
11756		y_1 := y.Args[1]
11757		for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11758			l := y_0
11759			if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11760				continue
11761			}
11762			mem := l.Args[1]
11763			if ptr != l.Args[0] {
11764				continue
11765			}
11766			x := y_1
11767			if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11768				continue
11769			}
11770			v.reset(OpAMD64ADDQmodify)
11771			v.AuxInt = int32ToAuxInt(off)
11772			v.Aux = symToAux(sym)
11773			v.AddArg3(ptr, x, mem)
11774			return true
11775		}
11776		break
11777	}
11778	// match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem)
11779	// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
11780	// result: (SUBQmodify [off] {sym} ptr x mem)
11781	for {
11782		off := auxIntToInt32(v.AuxInt)
11783		sym := auxToSym(v.Aux)
11784		ptr := v_0
11785		y := v_1
11786		if y.Op != OpAMD64SUBQ {
11787			break
11788		}
11789		x := y.Args[1]
11790		l := y.Args[0]
11791		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11792			break
11793		}
11794		mem := l.Args[1]
11795		if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11796			break
11797		}
11798		v.reset(OpAMD64SUBQmodify)
11799		v.AuxInt = int32ToAuxInt(off)
11800		v.Aux = symToAux(sym)
11801		v.AddArg3(ptr, x, mem)
11802		return true
11803	}
11804	// match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
11805	// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
11806	// result: (ANDQmodify [off] {sym} ptr x mem)
11807	for {
11808		off := auxIntToInt32(v.AuxInt)
11809		sym := auxToSym(v.Aux)
11810		ptr := v_0
11811		y := v_1
11812		if y.Op != OpAMD64ANDQ {
11813			break
11814		}
11815		_ = y.Args[1]
11816		y_0 := y.Args[0]
11817		y_1 := y.Args[1]
11818		for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11819			l := y_0
11820			if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11821				continue
11822			}
11823			mem := l.Args[1]
11824			if ptr != l.Args[0] {
11825				continue
11826			}
11827			x := y_1
11828			if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11829				continue
11830			}
11831			v.reset(OpAMD64ANDQmodify)
11832			v.AuxInt = int32ToAuxInt(off)
11833			v.Aux = symToAux(sym)
11834			v.AddArg3(ptr, x, mem)
11835			return true
11836		}
11837		break
11838	}
11839	// match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
11840	// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
11841	// result: (ORQmodify [off] {sym} ptr x mem)
11842	for {
11843		off := auxIntToInt32(v.AuxInt)
11844		sym := auxToSym(v.Aux)
11845		ptr := v_0
11846		y := v_1
11847		if y.Op != OpAMD64ORQ {
11848			break
11849		}
11850		_ = y.Args[1]
11851		y_0 := y.Args[0]
11852		y_1 := y.Args[1]
11853		for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11854			l := y_0
11855			if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11856				continue
11857			}
11858			mem := l.Args[1]
11859			if ptr != l.Args[0] {
11860				continue
11861			}
11862			x := y_1
11863			if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11864				continue
11865			}
11866			v.reset(OpAMD64ORQmodify)
11867			v.AuxInt = int32ToAuxInt(off)
11868			v.Aux = symToAux(sym)
11869			v.AddArg3(ptr, x, mem)
11870			return true
11871		}
11872		break
11873	}
11874	// match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
11875	// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
11876	// result: (XORQmodify [off] {sym} ptr x mem)
11877	for {
11878		off := auxIntToInt32(v.AuxInt)
11879		sym := auxToSym(v.Aux)
11880		ptr := v_0
11881		y := v_1
11882		if y.Op != OpAMD64XORQ {
11883			break
11884		}
11885		_ = y.Args[1]
11886		y_0 := y.Args[0]
11887		y_1 := y.Args[1]
11888		for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11889			l := y_0
11890			if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11891				continue
11892			}
11893			mem := l.Args[1]
11894			if ptr != l.Args[0] {
11895				continue
11896			}
11897			x := y_1
11898			if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11899				continue
11900			}
11901			v.reset(OpAMD64XORQmodify)
11902			v.AuxInt = int32ToAuxInt(off)
11903			v.Aux = symToAux(sym)
11904			v.AddArg3(ptr, x, mem)
11905			return true
11906		}
11907		break
11908	}
11909	// match: (MOVQstore {sym} [off] ptr x:(BTSQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
11910	// cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
11911	// result: (BTSQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
11912	for {
11913		off := auxIntToInt32(v.AuxInt)
11914		sym := auxToSym(v.Aux)
11915		ptr := v_0
11916		x := v_1
11917		if x.Op != OpAMD64BTSQconst {
11918			break
11919		}
11920		c := auxIntToInt8(x.AuxInt)
11921		l := x.Args[0]
11922		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11923			break
11924		}
11925		mem := l.Args[1]
11926		if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11927			break
11928		}
11929		v.reset(OpAMD64BTSQconstmodify)
11930		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11931		v.Aux = symToAux(sym)
11932		v.AddArg2(ptr, mem)
11933		return true
11934	}
11935	// match: (MOVQstore {sym} [off] ptr x:(BTRQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
11936	// cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
11937	// result: (BTRQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
11938	for {
11939		off := auxIntToInt32(v.AuxInt)
11940		sym := auxToSym(v.Aux)
11941		ptr := v_0
11942		x := v_1
11943		if x.Op != OpAMD64BTRQconst {
11944			break
11945		}
11946		c := auxIntToInt8(x.AuxInt)
11947		l := x.Args[0]
11948		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11949			break
11950		}
11951		mem := l.Args[1]
11952		if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11953			break
11954		}
11955		v.reset(OpAMD64BTRQconstmodify)
11956		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11957		v.Aux = symToAux(sym)
11958		v.AddArg2(ptr, mem)
11959		return true
11960	}
11961	// match: (MOVQstore {sym} [off] ptr x:(BTCQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
11962	// cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
11963	// result: (BTCQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
11964	for {
11965		off := auxIntToInt32(v.AuxInt)
11966		sym := auxToSym(v.Aux)
11967		ptr := v_0
11968		x := v_1
11969		if x.Op != OpAMD64BTCQconst {
11970			break
11971		}
11972		c := auxIntToInt8(x.AuxInt)
11973		l := x.Args[0]
11974		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11975			break
11976		}
11977		mem := l.Args[1]
11978		if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11979			break
11980		}
11981		v.reset(OpAMD64BTCQconstmodify)
11982		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11983		v.Aux = symToAux(sym)
11984		v.AddArg2(ptr, mem)
11985		return true
11986	}
11987	// match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
11988	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
11989	// result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
11990	for {
11991		off := auxIntToInt32(v.AuxInt)
11992		sym := auxToSym(v.Aux)
11993		ptr := v_0
11994		a := v_1
11995		if a.Op != OpAMD64ADDQconst {
11996			break
11997		}
11998		c := auxIntToInt32(a.AuxInt)
11999		l := a.Args[0]
12000		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12001			break
12002		}
12003		mem := l.Args[1]
12004		ptr2 := l.Args[0]
12005		if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12006			break
12007		}
12008		v.reset(OpAMD64ADDQconstmodify)
12009		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12010		v.Aux = symToAux(sym)
12011		v.AddArg2(ptr, mem)
12012		return true
12013	}
12014	// match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
12015	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12016	// result: (ANDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12017	for {
12018		off := auxIntToInt32(v.AuxInt)
12019		sym := auxToSym(v.Aux)
12020		ptr := v_0
12021		a := v_1
12022		if a.Op != OpAMD64ANDQconst {
12023			break
12024		}
12025		c := auxIntToInt32(a.AuxInt)
12026		l := a.Args[0]
12027		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12028			break
12029		}
12030		mem := l.Args[1]
12031		ptr2 := l.Args[0]
12032		if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12033			break
12034		}
12035		v.reset(OpAMD64ANDQconstmodify)
12036		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12037		v.Aux = symToAux(sym)
12038		v.AddArg2(ptr, mem)
12039		return true
12040	}
12041	// match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
12042	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12043	// result: (ORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12044	for {
12045		off := auxIntToInt32(v.AuxInt)
12046		sym := auxToSym(v.Aux)
12047		ptr := v_0
12048		a := v_1
12049		if a.Op != OpAMD64ORQconst {
12050			break
12051		}
12052		c := auxIntToInt32(a.AuxInt)
12053		l := a.Args[0]
12054		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12055			break
12056		}
12057		mem := l.Args[1]
12058		ptr2 := l.Args[0]
12059		if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12060			break
12061		}
12062		v.reset(OpAMD64ORQconstmodify)
12063		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12064		v.Aux = symToAux(sym)
12065		v.AddArg2(ptr, mem)
12066		return true
12067	}
12068	// match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
12069	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12070	// result: (XORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12071	for {
12072		off := auxIntToInt32(v.AuxInt)
12073		sym := auxToSym(v.Aux)
12074		ptr := v_0
12075		a := v_1
12076		if a.Op != OpAMD64XORQconst {
12077			break
12078		}
12079		c := auxIntToInt32(a.AuxInt)
12080		l := a.Args[0]
12081		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12082			break
12083		}
12084		mem := l.Args[1]
12085		ptr2 := l.Args[0]
12086		if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12087			break
12088		}
12089		v.reset(OpAMD64XORQconstmodify)
12090		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12091		v.Aux = symToAux(sym)
12092		v.AddArg2(ptr, mem)
12093		return true
12094	}
12095	// match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
12096	// result: (MOVSDstore [off] {sym} ptr val mem)
12097	for {
12098		off := auxIntToInt32(v.AuxInt)
12099		sym := auxToSym(v.Aux)
12100		ptr := v_0
12101		if v_1.Op != OpAMD64MOVQf2i {
12102			break
12103		}
12104		val := v_1.Args[0]
12105		mem := v_2
12106		v.reset(OpAMD64MOVSDstore)
12107		v.AuxInt = int32ToAuxInt(off)
12108		v.Aux = symToAux(sym)
12109		v.AddArg3(ptr, val, mem)
12110		return true
12111	}
12112	// match: (MOVQstore [i] {s} p x:(BSWAPQ w) mem)
12113	// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
12114	// result: (MOVBEQstore [i] {s} p w mem)
12115	for {
12116		i := auxIntToInt32(v.AuxInt)
12117		s := auxToSym(v.Aux)
12118		p := v_0
12119		x := v_1
12120		if x.Op != OpAMD64BSWAPQ {
12121			break
12122		}
12123		w := x.Args[0]
12124		mem := v_2
12125		if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12126			break
12127		}
12128		v.reset(OpAMD64MOVBEQstore)
12129		v.AuxInt = int32ToAuxInt(i)
12130		v.Aux = symToAux(s)
12131		v.AddArg3(p, w, mem)
12132		return true
12133	}
12134	return false
12135}
12136func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
12137	v_1 := v.Args[1]
12138	v_0 := v.Args[0]
12139	b := v.Block
12140	config := b.Func.Config
12141	// match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
12142	// cond: ValAndOff(sc).canAdd32(off)
12143	// result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
12144	for {
12145		sc := auxIntToValAndOff(v.AuxInt)
12146		s := auxToSym(v.Aux)
12147		if v_0.Op != OpAMD64ADDQconst {
12148			break
12149		}
12150		off := auxIntToInt32(v_0.AuxInt)
12151		ptr := v_0.Args[0]
12152		mem := v_1
12153		if !(ValAndOff(sc).canAdd32(off)) {
12154			break
12155		}
12156		v.reset(OpAMD64MOVQstoreconst)
12157		v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12158		v.Aux = symToAux(s)
12159		v.AddArg2(ptr, mem)
12160		return true
12161	}
12162	// match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
12163	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
12164	// result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
12165	for {
12166		sc := auxIntToValAndOff(v.AuxInt)
12167		sym1 := auxToSym(v.Aux)
12168		if v_0.Op != OpAMD64LEAQ {
12169			break
12170		}
12171		off := auxIntToInt32(v_0.AuxInt)
12172		sym2 := auxToSym(v_0.Aux)
12173		ptr := v_0.Args[0]
12174		mem := v_1
12175		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12176			break
12177		}
12178		v.reset(OpAMD64MOVQstoreconst)
12179		v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12180		v.Aux = symToAux(mergeSym(sym1, sym2))
12181		v.AddArg2(ptr, mem)
12182		return true
12183	}
12184	// match: (MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem))
12185	// cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)
12186	// result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
12187	for {
12188		c := auxIntToValAndOff(v.AuxInt)
12189		s := auxToSym(v.Aux)
12190		p1 := v_0
12191		x := v_1
12192		if x.Op != OpAMD64MOVQstoreconst {
12193			break
12194		}
12195		a := auxIntToValAndOff(x.AuxInt)
12196		if auxToSym(x.Aux) != s {
12197			break
12198		}
12199		mem := x.Args[1]
12200		p0 := x.Args[0]
12201		if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12202			break
12203		}
12204		v.reset(OpAMD64MOVOstoreconst)
12205		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12206		v.Aux = symToAux(s)
12207		v.AddArg2(p0, mem)
12208		return true
12209	}
12210	// match: (MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem))
12211	// cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)
12212	// result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
12213	for {
12214		a := auxIntToValAndOff(v.AuxInt)
12215		s := auxToSym(v.Aux)
12216		p0 := v_0
12217		x := v_1
12218		if x.Op != OpAMD64MOVQstoreconst {
12219			break
12220		}
12221		c := auxIntToValAndOff(x.AuxInt)
12222		if auxToSym(x.Aux) != s {
12223			break
12224		}
12225		mem := x.Args[1]
12226		p1 := x.Args[0]
12227		if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12228			break
12229		}
12230		v.reset(OpAMD64MOVOstoreconst)
12231		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12232		v.Aux = symToAux(s)
12233		v.AddArg2(p0, mem)
12234		return true
12235	}
12236	return false
12237}
12238func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
12239	v_1 := v.Args[1]
12240	v_0 := v.Args[0]
12241	// match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
12242	// cond: is32Bit(int64(off1)+int64(off2))
12243	// result: (MOVSDload [off1+off2] {sym} ptr mem)
12244	for {
12245		off1 := auxIntToInt32(v.AuxInt)
12246		sym := auxToSym(v.Aux)
12247		if v_0.Op != OpAMD64ADDQconst {
12248			break
12249		}
12250		off2 := auxIntToInt32(v_0.AuxInt)
12251		ptr := v_0.Args[0]
12252		mem := v_1
12253		if !(is32Bit(int64(off1) + int64(off2))) {
12254			break
12255		}
12256		v.reset(OpAMD64MOVSDload)
12257		v.AuxInt = int32ToAuxInt(off1 + off2)
12258		v.Aux = symToAux(sym)
12259		v.AddArg2(ptr, mem)
12260		return true
12261	}
12262	// match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
12263	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12264	// result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
12265	for {
12266		off1 := auxIntToInt32(v.AuxInt)
12267		sym1 := auxToSym(v.Aux)
12268		if v_0.Op != OpAMD64LEAQ {
12269			break
12270		}
12271		off2 := auxIntToInt32(v_0.AuxInt)
12272		sym2 := auxToSym(v_0.Aux)
12273		base := v_0.Args[0]
12274		mem := v_1
12275		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12276			break
12277		}
12278		v.reset(OpAMD64MOVSDload)
12279		v.AuxInt = int32ToAuxInt(off1 + off2)
12280		v.Aux = symToAux(mergeSym(sym1, sym2))
12281		v.AddArg2(base, mem)
12282		return true
12283	}
12284	// match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _))
12285	// result: (MOVQi2f val)
12286	for {
12287		off := auxIntToInt32(v.AuxInt)
12288		sym := auxToSym(v.Aux)
12289		ptr := v_0
12290		if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12291			break
12292		}
12293		val := v_1.Args[1]
12294		if ptr != v_1.Args[0] {
12295			break
12296		}
12297		v.reset(OpAMD64MOVQi2f)
12298		v.AddArg(val)
12299		return true
12300	}
12301	return false
12302}
12303func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
12304	v_2 := v.Args[2]
12305	v_1 := v.Args[1]
12306	v_0 := v.Args[0]
12307	// match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
12308	// cond: is32Bit(int64(off1)+int64(off2))
12309	// result: (MOVSDstore [off1+off2] {sym} ptr val mem)
12310	for {
12311		off1 := auxIntToInt32(v.AuxInt)
12312		sym := auxToSym(v.Aux)
12313		if v_0.Op != OpAMD64ADDQconst {
12314			break
12315		}
12316		off2 := auxIntToInt32(v_0.AuxInt)
12317		ptr := v_0.Args[0]
12318		val := v_1
12319		mem := v_2
12320		if !(is32Bit(int64(off1) + int64(off2))) {
12321			break
12322		}
12323		v.reset(OpAMD64MOVSDstore)
12324		v.AuxInt = int32ToAuxInt(off1 + off2)
12325		v.Aux = symToAux(sym)
12326		v.AddArg3(ptr, val, mem)
12327		return true
12328	}
12329	// match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
12330	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12331	// result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
12332	for {
12333		off1 := auxIntToInt32(v.AuxInt)
12334		sym1 := auxToSym(v.Aux)
12335		if v_0.Op != OpAMD64LEAQ {
12336			break
12337		}
12338		off2 := auxIntToInt32(v_0.AuxInt)
12339		sym2 := auxToSym(v_0.Aux)
12340		base := v_0.Args[0]
12341		val := v_1
12342		mem := v_2
12343		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12344			break
12345		}
12346		v.reset(OpAMD64MOVSDstore)
12347		v.AuxInt = int32ToAuxInt(off1 + off2)
12348		v.Aux = symToAux(mergeSym(sym1, sym2))
12349		v.AddArg3(base, val, mem)
12350		return true
12351	}
12352	// match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem)
12353	// result: (MOVQstore [off] {sym} ptr val mem)
12354	for {
12355		off := auxIntToInt32(v.AuxInt)
12356		sym := auxToSym(v.Aux)
12357		ptr := v_0
12358		if v_1.Op != OpAMD64MOVQi2f {
12359			break
12360		}
12361		val := v_1.Args[0]
12362		mem := v_2
12363		v.reset(OpAMD64MOVQstore)
12364		v.AuxInt = int32ToAuxInt(off)
12365		v.Aux = symToAux(sym)
12366		v.AddArg3(ptr, val, mem)
12367		return true
12368	}
12369	return false
12370}
12371func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
12372	v_1 := v.Args[1]
12373	v_0 := v.Args[0]
12374	// match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
12375	// cond: is32Bit(int64(off1)+int64(off2))
12376	// result: (MOVSSload [off1+off2] {sym} ptr mem)
12377	for {
12378		off1 := auxIntToInt32(v.AuxInt)
12379		sym := auxToSym(v.Aux)
12380		if v_0.Op != OpAMD64ADDQconst {
12381			break
12382		}
12383		off2 := auxIntToInt32(v_0.AuxInt)
12384		ptr := v_0.Args[0]
12385		mem := v_1
12386		if !(is32Bit(int64(off1) + int64(off2))) {
12387			break
12388		}
12389		v.reset(OpAMD64MOVSSload)
12390		v.AuxInt = int32ToAuxInt(off1 + off2)
12391		v.Aux = symToAux(sym)
12392		v.AddArg2(ptr, mem)
12393		return true
12394	}
12395	// match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
12396	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12397	// result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
12398	for {
12399		off1 := auxIntToInt32(v.AuxInt)
12400		sym1 := auxToSym(v.Aux)
12401		if v_0.Op != OpAMD64LEAQ {
12402			break
12403		}
12404		off2 := auxIntToInt32(v_0.AuxInt)
12405		sym2 := auxToSym(v_0.Aux)
12406		base := v_0.Args[0]
12407		mem := v_1
12408		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12409			break
12410		}
12411		v.reset(OpAMD64MOVSSload)
12412		v.AuxInt = int32ToAuxInt(off1 + off2)
12413		v.Aux = symToAux(mergeSym(sym1, sym2))
12414		v.AddArg2(base, mem)
12415		return true
12416	}
12417	// match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _))
12418	// result: (MOVLi2f val)
12419	for {
12420		off := auxIntToInt32(v.AuxInt)
12421		sym := auxToSym(v.Aux)
12422		ptr := v_0
12423		if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12424			break
12425		}
12426		val := v_1.Args[1]
12427		if ptr != v_1.Args[0] {
12428			break
12429		}
12430		v.reset(OpAMD64MOVLi2f)
12431		v.AddArg(val)
12432		return true
12433	}
12434	return false
12435}
12436func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
12437	v_2 := v.Args[2]
12438	v_1 := v.Args[1]
12439	v_0 := v.Args[0]
12440	// match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
12441	// cond: is32Bit(int64(off1)+int64(off2))
12442	// result: (MOVSSstore [off1+off2] {sym} ptr val mem)
12443	for {
12444		off1 := auxIntToInt32(v.AuxInt)
12445		sym := auxToSym(v.Aux)
12446		if v_0.Op != OpAMD64ADDQconst {
12447			break
12448		}
12449		off2 := auxIntToInt32(v_0.AuxInt)
12450		ptr := v_0.Args[0]
12451		val := v_1
12452		mem := v_2
12453		if !(is32Bit(int64(off1) + int64(off2))) {
12454			break
12455		}
12456		v.reset(OpAMD64MOVSSstore)
12457		v.AuxInt = int32ToAuxInt(off1 + off2)
12458		v.Aux = symToAux(sym)
12459		v.AddArg3(ptr, val, mem)
12460		return true
12461	}
12462	// match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
12463	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12464	// result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
12465	for {
12466		off1 := auxIntToInt32(v.AuxInt)
12467		sym1 := auxToSym(v.Aux)
12468		if v_0.Op != OpAMD64LEAQ {
12469			break
12470		}
12471		off2 := auxIntToInt32(v_0.AuxInt)
12472		sym2 := auxToSym(v_0.Aux)
12473		base := v_0.Args[0]
12474		val := v_1
12475		mem := v_2
12476		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12477			break
12478		}
12479		v.reset(OpAMD64MOVSSstore)
12480		v.AuxInt = int32ToAuxInt(off1 + off2)
12481		v.Aux = symToAux(mergeSym(sym1, sym2))
12482		v.AddArg3(base, val, mem)
12483		return true
12484	}
12485	// match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem)
12486	// result: (MOVLstore [off] {sym} ptr val mem)
12487	for {
12488		off := auxIntToInt32(v.AuxInt)
12489		sym := auxToSym(v.Aux)
12490		ptr := v_0
12491		if v_1.Op != OpAMD64MOVLi2f {
12492			break
12493		}
12494		val := v_1.Args[0]
12495		mem := v_2
12496		v.reset(OpAMD64MOVLstore)
12497		v.AuxInt = int32ToAuxInt(off)
12498		v.Aux = symToAux(sym)
12499		v.AddArg3(ptr, val, mem)
12500		return true
12501	}
12502	return false
12503}
12504func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
12505	v_0 := v.Args[0]
12506	b := v.Block
12507	// match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
12508	// cond: x.Uses == 1 && clobber(x)
12509	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
12510	for {
12511		x := v_0
12512		if x.Op != OpAMD64MOVWload {
12513			break
12514		}
12515		off := auxIntToInt32(x.AuxInt)
12516		sym := auxToSym(x.Aux)
12517		mem := x.Args[1]
12518		ptr := x.Args[0]
12519		if !(x.Uses == 1 && clobber(x)) {
12520			break
12521		}
12522		b = x.Block
12523		v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12524		v.copyOf(v0)
12525		v0.AuxInt = int32ToAuxInt(off)
12526		v0.Aux = symToAux(sym)
12527		v0.AddArg2(ptr, mem)
12528		return true
12529	}
12530	// match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem))
12531	// cond: x.Uses == 1 && clobber(x)
12532	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
12533	for {
12534		x := v_0
12535		if x.Op != OpAMD64MOVLload {
12536			break
12537		}
12538		off := auxIntToInt32(x.AuxInt)
12539		sym := auxToSym(x.Aux)
12540		mem := x.Args[1]
12541		ptr := x.Args[0]
12542		if !(x.Uses == 1 && clobber(x)) {
12543			break
12544		}
12545		b = x.Block
12546		v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12547		v.copyOf(v0)
12548		v0.AuxInt = int32ToAuxInt(off)
12549		v0.Aux = symToAux(sym)
12550		v0.AddArg2(ptr, mem)
12551		return true
12552	}
12553	// match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem))
12554	// cond: x.Uses == 1 && clobber(x)
12555	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
12556	for {
12557		x := v_0
12558		if x.Op != OpAMD64MOVQload {
12559			break
12560		}
12561		off := auxIntToInt32(x.AuxInt)
12562		sym := auxToSym(x.Aux)
12563		mem := x.Args[1]
12564		ptr := x.Args[0]
12565		if !(x.Uses == 1 && clobber(x)) {
12566			break
12567		}
12568		b = x.Block
12569		v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12570		v.copyOf(v0)
12571		v0.AuxInt = int32ToAuxInt(off)
12572		v0.Aux = symToAux(sym)
12573		v0.AddArg2(ptr, mem)
12574		return true
12575	}
12576	// match: (MOVWQSX (ANDLconst [c] x))
12577	// cond: c & 0x8000 == 0
12578	// result: (ANDLconst [c & 0x7fff] x)
12579	for {
12580		if v_0.Op != OpAMD64ANDLconst {
12581			break
12582		}
12583		c := auxIntToInt32(v_0.AuxInt)
12584		x := v_0.Args[0]
12585		if !(c&0x8000 == 0) {
12586			break
12587		}
12588		v.reset(OpAMD64ANDLconst)
12589		v.AuxInt = int32ToAuxInt(c & 0x7fff)
12590		v.AddArg(x)
12591		return true
12592	}
12593	// match: (MOVWQSX (MOVWQSX x))
12594	// result: (MOVWQSX x)
12595	for {
12596		if v_0.Op != OpAMD64MOVWQSX {
12597			break
12598		}
12599		x := v_0.Args[0]
12600		v.reset(OpAMD64MOVWQSX)
12601		v.AddArg(x)
12602		return true
12603	}
12604	// match: (MOVWQSX (MOVBQSX x))
12605	// result: (MOVBQSX x)
12606	for {
12607		if v_0.Op != OpAMD64MOVBQSX {
12608			break
12609		}
12610		x := v_0.Args[0]
12611		v.reset(OpAMD64MOVBQSX)
12612		v.AddArg(x)
12613		return true
12614	}
12615	return false
12616}
12617func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
12618	v_1 := v.Args[1]
12619	v_0 := v.Args[0]
12620	// match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
12621	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
12622	// result: (MOVWQSX x)
12623	for {
12624		off := auxIntToInt32(v.AuxInt)
12625		sym := auxToSym(v.Aux)
12626		ptr := v_0
12627		if v_1.Op != OpAMD64MOVWstore {
12628			break
12629		}
12630		off2 := auxIntToInt32(v_1.AuxInt)
12631		sym2 := auxToSym(v_1.Aux)
12632		x := v_1.Args[1]
12633		ptr2 := v_1.Args[0]
12634		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12635			break
12636		}
12637		v.reset(OpAMD64MOVWQSX)
12638		v.AddArg(x)
12639		return true
12640	}
12641	// match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
12642	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12643	// result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
12644	for {
12645		off1 := auxIntToInt32(v.AuxInt)
12646		sym1 := auxToSym(v.Aux)
12647		if v_0.Op != OpAMD64LEAQ {
12648			break
12649		}
12650		off2 := auxIntToInt32(v_0.AuxInt)
12651		sym2 := auxToSym(v_0.Aux)
12652		base := v_0.Args[0]
12653		mem := v_1
12654		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12655			break
12656		}
12657		v.reset(OpAMD64MOVWQSXload)
12658		v.AuxInt = int32ToAuxInt(off1 + off2)
12659		v.Aux = symToAux(mergeSym(sym1, sym2))
12660		v.AddArg2(base, mem)
12661		return true
12662	}
12663	return false
12664}
12665func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
12666	v_0 := v.Args[0]
12667	b := v.Block
12668	// match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
12669	// cond: x.Uses == 1 && clobber(x)
12670	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
12671	for {
12672		x := v_0
12673		if x.Op != OpAMD64MOVWload {
12674			break
12675		}
12676		off := auxIntToInt32(x.AuxInt)
12677		sym := auxToSym(x.Aux)
12678		mem := x.Args[1]
12679		ptr := x.Args[0]
12680		if !(x.Uses == 1 && clobber(x)) {
12681			break
12682		}
12683		b = x.Block
12684		v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12685		v.copyOf(v0)
12686		v0.AuxInt = int32ToAuxInt(off)
12687		v0.Aux = symToAux(sym)
12688		v0.AddArg2(ptr, mem)
12689		return true
12690	}
12691	// match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem))
12692	// cond: x.Uses == 1 && clobber(x)
12693	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
12694	for {
12695		x := v_0
12696		if x.Op != OpAMD64MOVLload {
12697			break
12698		}
12699		off := auxIntToInt32(x.AuxInt)
12700		sym := auxToSym(x.Aux)
12701		mem := x.Args[1]
12702		ptr := x.Args[0]
12703		if !(x.Uses == 1 && clobber(x)) {
12704			break
12705		}
12706		b = x.Block
12707		v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12708		v.copyOf(v0)
12709		v0.AuxInt = int32ToAuxInt(off)
12710		v0.Aux = symToAux(sym)
12711		v0.AddArg2(ptr, mem)
12712		return true
12713	}
12714	// match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem))
12715	// cond: x.Uses == 1 && clobber(x)
12716	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
12717	for {
12718		x := v_0
12719		if x.Op != OpAMD64MOVQload {
12720			break
12721		}
12722		off := auxIntToInt32(x.AuxInt)
12723		sym := auxToSym(x.Aux)
12724		mem := x.Args[1]
12725		ptr := x.Args[0]
12726		if !(x.Uses == 1 && clobber(x)) {
12727			break
12728		}
12729		b = x.Block
12730		v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12731		v.copyOf(v0)
12732		v0.AuxInt = int32ToAuxInt(off)
12733		v0.Aux = symToAux(sym)
12734		v0.AddArg2(ptr, mem)
12735		return true
12736	}
12737	// match: (MOVWQZX (ANDLconst [c] x))
12738	// result: (ANDLconst [c & 0xffff] x)
12739	for {
12740		if v_0.Op != OpAMD64ANDLconst {
12741			break
12742		}
12743		c := auxIntToInt32(v_0.AuxInt)
12744		x := v_0.Args[0]
12745		v.reset(OpAMD64ANDLconst)
12746		v.AuxInt = int32ToAuxInt(c & 0xffff)
12747		v.AddArg(x)
12748		return true
12749	}
12750	// match: (MOVWQZX (MOVWQZX x))
12751	// result: (MOVWQZX x)
12752	for {
12753		if v_0.Op != OpAMD64MOVWQZX {
12754			break
12755		}
12756		x := v_0.Args[0]
12757		v.reset(OpAMD64MOVWQZX)
12758		v.AddArg(x)
12759		return true
12760	}
12761	// match: (MOVWQZX (MOVBQZX x))
12762	// result: (MOVBQZX x)
12763	for {
12764		if v_0.Op != OpAMD64MOVBQZX {
12765			break
12766		}
12767		x := v_0.Args[0]
12768		v.reset(OpAMD64MOVBQZX)
12769		v.AddArg(x)
12770		return true
12771	}
12772	return false
12773}
12774func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
12775	v_1 := v.Args[1]
12776	v_0 := v.Args[0]
12777	b := v.Block
12778	config := b.Func.Config
12779	// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
12780	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
12781	// result: (MOVWQZX x)
12782	for {
12783		off := auxIntToInt32(v.AuxInt)
12784		sym := auxToSym(v.Aux)
12785		ptr := v_0
12786		if v_1.Op != OpAMD64MOVWstore {
12787			break
12788		}
12789		off2 := auxIntToInt32(v_1.AuxInt)
12790		sym2 := auxToSym(v_1.Aux)
12791		x := v_1.Args[1]
12792		ptr2 := v_1.Args[0]
12793		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12794			break
12795		}
12796		v.reset(OpAMD64MOVWQZX)
12797		v.AddArg(x)
12798		return true
12799	}
12800	// match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
12801	// cond: is32Bit(int64(off1)+int64(off2))
12802	// result: (MOVWload [off1+off2] {sym} ptr mem)
12803	for {
12804		off1 := auxIntToInt32(v.AuxInt)
12805		sym := auxToSym(v.Aux)
12806		if v_0.Op != OpAMD64ADDQconst {
12807			break
12808		}
12809		off2 := auxIntToInt32(v_0.AuxInt)
12810		ptr := v_0.Args[0]
12811		mem := v_1
12812		if !(is32Bit(int64(off1) + int64(off2))) {
12813			break
12814		}
12815		v.reset(OpAMD64MOVWload)
12816		v.AuxInt = int32ToAuxInt(off1 + off2)
12817		v.Aux = symToAux(sym)
12818		v.AddArg2(ptr, mem)
12819		return true
12820	}
12821	// match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
12822	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12823	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
12824	for {
12825		off1 := auxIntToInt32(v.AuxInt)
12826		sym1 := auxToSym(v.Aux)
12827		if v_0.Op != OpAMD64LEAQ {
12828			break
12829		}
12830		off2 := auxIntToInt32(v_0.AuxInt)
12831		sym2 := auxToSym(v_0.Aux)
12832		base := v_0.Args[0]
12833		mem := v_1
12834		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12835			break
12836		}
12837		v.reset(OpAMD64MOVWload)
12838		v.AuxInt = int32ToAuxInt(off1 + off2)
12839		v.Aux = symToAux(mergeSym(sym1, sym2))
12840		v.AddArg2(base, mem)
12841		return true
12842	}
12843	// match: (MOVWload [off] {sym} (SB) _)
12844	// cond: symIsRO(sym)
12845	// result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
12846	for {
12847		off := auxIntToInt32(v.AuxInt)
12848		sym := auxToSym(v.Aux)
12849		if v_0.Op != OpSB || !(symIsRO(sym)) {
12850			break
12851		}
12852		v.reset(OpAMD64MOVLconst)
12853		v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
12854		return true
12855	}
12856	return false
12857}
12858func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
12859	v_2 := v.Args[2]
12860	v_1 := v.Args[1]
12861	v_0 := v.Args[0]
12862	// match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
12863	// result: (MOVWstore [off] {sym} ptr x mem)
12864	for {
12865		off := auxIntToInt32(v.AuxInt)
12866		sym := auxToSym(v.Aux)
12867		ptr := v_0
12868		if v_1.Op != OpAMD64MOVWQSX {
12869			break
12870		}
12871		x := v_1.Args[0]
12872		mem := v_2
12873		v.reset(OpAMD64MOVWstore)
12874		v.AuxInt = int32ToAuxInt(off)
12875		v.Aux = symToAux(sym)
12876		v.AddArg3(ptr, x, mem)
12877		return true
12878	}
12879	// match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
12880	// result: (MOVWstore [off] {sym} ptr x mem)
12881	for {
12882		off := auxIntToInt32(v.AuxInt)
12883		sym := auxToSym(v.Aux)
12884		ptr := v_0
12885		if v_1.Op != OpAMD64MOVWQZX {
12886			break
12887		}
12888		x := v_1.Args[0]
12889		mem := v_2
12890		v.reset(OpAMD64MOVWstore)
12891		v.AuxInt = int32ToAuxInt(off)
12892		v.Aux = symToAux(sym)
12893		v.AddArg3(ptr, x, mem)
12894		return true
12895	}
12896	// match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
12897	// cond: is32Bit(int64(off1)+int64(off2))
12898	// result: (MOVWstore [off1+off2] {sym} ptr val mem)
12899	for {
12900		off1 := auxIntToInt32(v.AuxInt)
12901		sym := auxToSym(v.Aux)
12902		if v_0.Op != OpAMD64ADDQconst {
12903			break
12904		}
12905		off2 := auxIntToInt32(v_0.AuxInt)
12906		ptr := v_0.Args[0]
12907		val := v_1
12908		mem := v_2
12909		if !(is32Bit(int64(off1) + int64(off2))) {
12910			break
12911		}
12912		v.reset(OpAMD64MOVWstore)
12913		v.AuxInt = int32ToAuxInt(off1 + off2)
12914		v.Aux = symToAux(sym)
12915		v.AddArg3(ptr, val, mem)
12916		return true
12917	}
12918	// match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
12919	// result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
12920	for {
12921		off := auxIntToInt32(v.AuxInt)
12922		sym := auxToSym(v.Aux)
12923		ptr := v_0
12924		if v_1.Op != OpAMD64MOVLconst {
12925			break
12926		}
12927		c := auxIntToInt32(v_1.AuxInt)
12928		mem := v_2
12929		v.reset(OpAMD64MOVWstoreconst)
12930		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
12931		v.Aux = symToAux(sym)
12932		v.AddArg2(ptr, mem)
12933		return true
12934	}
12935	// match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem)
12936	// result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
12937	for {
12938		off := auxIntToInt32(v.AuxInt)
12939		sym := auxToSym(v.Aux)
12940		ptr := v_0
12941		if v_1.Op != OpAMD64MOVQconst {
12942			break
12943		}
12944		c := auxIntToInt64(v_1.AuxInt)
12945		mem := v_2
12946		v.reset(OpAMD64MOVWstoreconst)
12947		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
12948		v.Aux = symToAux(sym)
12949		v.AddArg2(ptr, mem)
12950		return true
12951	}
12952	// match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
12953	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12954	// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
12955	for {
12956		off1 := auxIntToInt32(v.AuxInt)
12957		sym1 := auxToSym(v.Aux)
12958		if v_0.Op != OpAMD64LEAQ {
12959			break
12960		}
12961		off2 := auxIntToInt32(v_0.AuxInt)
12962		sym2 := auxToSym(v_0.Aux)
12963		base := v_0.Args[0]
12964		val := v_1
12965		mem := v_2
12966		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12967			break
12968		}
12969		v.reset(OpAMD64MOVWstore)
12970		v.AuxInt = int32ToAuxInt(off1 + off2)
12971		v.Aux = symToAux(mergeSym(sym1, sym2))
12972		v.AddArg3(base, val, mem)
12973		return true
12974	}
12975	// match: (MOVWstore [i] {s} p x:(ROLWconst [8] w) mem)
12976	// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
12977	// result: (MOVBEWstore [i] {s} p w mem)
12978	for {
12979		i := auxIntToInt32(v.AuxInt)
12980		s := auxToSym(v.Aux)
12981		p := v_0
12982		x := v_1
12983		if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
12984			break
12985		}
12986		w := x.Args[0]
12987		mem := v_2
12988		if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12989			break
12990		}
12991		v.reset(OpAMD64MOVBEWstore)
12992		v.AuxInt = int32ToAuxInt(i)
12993		v.Aux = symToAux(s)
12994		v.AddArg3(p, w, mem)
12995		return true
12996	}
12997	return false
12998}
12999func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
13000	v_1 := v.Args[1]
13001	v_0 := v.Args[0]
13002	// match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
13003	// cond: ValAndOff(sc).canAdd32(off)
13004	// result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
13005	for {
13006		sc := auxIntToValAndOff(v.AuxInt)
13007		s := auxToSym(v.Aux)
13008		if v_0.Op != OpAMD64ADDQconst {
13009			break
13010		}
13011		off := auxIntToInt32(v_0.AuxInt)
13012		ptr := v_0.Args[0]
13013		mem := v_1
13014		if !(ValAndOff(sc).canAdd32(off)) {
13015			break
13016		}
13017		v.reset(OpAMD64MOVWstoreconst)
13018		v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13019		v.Aux = symToAux(s)
13020		v.AddArg2(ptr, mem)
13021		return true
13022	}
13023	// match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
13024	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
13025	// result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
13026	for {
13027		sc := auxIntToValAndOff(v.AuxInt)
13028		sym1 := auxToSym(v.Aux)
13029		if v_0.Op != OpAMD64LEAQ {
13030			break
13031		}
13032		off := auxIntToInt32(v_0.AuxInt)
13033		sym2 := auxToSym(v_0.Aux)
13034		ptr := v_0.Args[0]
13035		mem := v_1
13036		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
13037			break
13038		}
13039		v.reset(OpAMD64MOVWstoreconst)
13040		v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13041		v.Aux = symToAux(mergeSym(sym1, sym2))
13042		v.AddArg2(ptr, mem)
13043		return true
13044	}
13045	return false
13046}
13047func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
13048	v_1 := v.Args[1]
13049	v_0 := v.Args[0]
13050	// match: (MULL x (MOVLconst [c]))
13051	// result: (MULLconst [c] x)
13052	for {
13053		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13054			x := v_0
13055			if v_1.Op != OpAMD64MOVLconst {
13056				continue
13057			}
13058			c := auxIntToInt32(v_1.AuxInt)
13059			v.reset(OpAMD64MULLconst)
13060			v.AuxInt = int32ToAuxInt(c)
13061			v.AddArg(x)
13062			return true
13063		}
13064		break
13065	}
13066	return false
13067}
13068func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
13069	v_0 := v.Args[0]
13070	b := v.Block
13071	// match: (MULLconst [c] (MULLconst [d] x))
13072	// result: (MULLconst [c * d] x)
13073	for {
13074		c := auxIntToInt32(v.AuxInt)
13075		if v_0.Op != OpAMD64MULLconst {
13076			break
13077		}
13078		d := auxIntToInt32(v_0.AuxInt)
13079		x := v_0.Args[0]
13080		v.reset(OpAMD64MULLconst)
13081		v.AuxInt = int32ToAuxInt(c * d)
13082		v.AddArg(x)
13083		return true
13084	}
13085	// match: (MULLconst [-9] x)
13086	// result: (NEGL (LEAL8 <v.Type> x x))
13087	for {
13088		if auxIntToInt32(v.AuxInt) != -9 {
13089			break
13090		}
13091		x := v_0
13092		v.reset(OpAMD64NEGL)
13093		v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13094		v0.AddArg2(x, x)
13095		v.AddArg(v0)
13096		return true
13097	}
13098	// match: (MULLconst [-5] x)
13099	// result: (NEGL (LEAL4 <v.Type> x x))
13100	for {
13101		if auxIntToInt32(v.AuxInt) != -5 {
13102			break
13103		}
13104		x := v_0
13105		v.reset(OpAMD64NEGL)
13106		v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13107		v0.AddArg2(x, x)
13108		v.AddArg(v0)
13109		return true
13110	}
13111	// match: (MULLconst [-3] x)
13112	// result: (NEGL (LEAL2 <v.Type> x x))
13113	for {
13114		if auxIntToInt32(v.AuxInt) != -3 {
13115			break
13116		}
13117		x := v_0
13118		v.reset(OpAMD64NEGL)
13119		v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13120		v0.AddArg2(x, x)
13121		v.AddArg(v0)
13122		return true
13123	}
13124	// match: (MULLconst [-1] x)
13125	// result: (NEGL x)
13126	for {
13127		if auxIntToInt32(v.AuxInt) != -1 {
13128			break
13129		}
13130		x := v_0
13131		v.reset(OpAMD64NEGL)
13132		v.AddArg(x)
13133		return true
13134	}
13135	// match: (MULLconst [ 0] _)
13136	// result: (MOVLconst [0])
13137	for {
13138		if auxIntToInt32(v.AuxInt) != 0 {
13139			break
13140		}
13141		v.reset(OpAMD64MOVLconst)
13142		v.AuxInt = int32ToAuxInt(0)
13143		return true
13144	}
13145	// match: (MULLconst [ 1] x)
13146	// result: x
13147	for {
13148		if auxIntToInt32(v.AuxInt) != 1 {
13149			break
13150		}
13151		x := v_0
13152		v.copyOf(x)
13153		return true
13154	}
13155	// match: (MULLconst [ 3] x)
13156	// result: (LEAL2 x x)
13157	for {
13158		if auxIntToInt32(v.AuxInt) != 3 {
13159			break
13160		}
13161		x := v_0
13162		v.reset(OpAMD64LEAL2)
13163		v.AddArg2(x, x)
13164		return true
13165	}
13166	// match: (MULLconst [ 5] x)
13167	// result: (LEAL4 x x)
13168	for {
13169		if auxIntToInt32(v.AuxInt) != 5 {
13170			break
13171		}
13172		x := v_0
13173		v.reset(OpAMD64LEAL4)
13174		v.AddArg2(x, x)
13175		return true
13176	}
13177	// match: (MULLconst [ 7] x)
13178	// result: (LEAL2 x (LEAL2 <v.Type> x x))
13179	for {
13180		if auxIntToInt32(v.AuxInt) != 7 {
13181			break
13182		}
13183		x := v_0
13184		v.reset(OpAMD64LEAL2)
13185		v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13186		v0.AddArg2(x, x)
13187		v.AddArg2(x, v0)
13188		return true
13189	}
13190	// match: (MULLconst [ 9] x)
13191	// result: (LEAL8 x x)
13192	for {
13193		if auxIntToInt32(v.AuxInt) != 9 {
13194			break
13195		}
13196		x := v_0
13197		v.reset(OpAMD64LEAL8)
13198		v.AddArg2(x, x)
13199		return true
13200	}
13201	// match: (MULLconst [11] x)
13202	// result: (LEAL2 x (LEAL4 <v.Type> x x))
13203	for {
13204		if auxIntToInt32(v.AuxInt) != 11 {
13205			break
13206		}
13207		x := v_0
13208		v.reset(OpAMD64LEAL2)
13209		v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13210		v0.AddArg2(x, x)
13211		v.AddArg2(x, v0)
13212		return true
13213	}
13214	// match: (MULLconst [13] x)
13215	// result: (LEAL4 x (LEAL2 <v.Type> x x))
13216	for {
13217		if auxIntToInt32(v.AuxInt) != 13 {
13218			break
13219		}
13220		x := v_0
13221		v.reset(OpAMD64LEAL4)
13222		v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13223		v0.AddArg2(x, x)
13224		v.AddArg2(x, v0)
13225		return true
13226	}
13227	// match: (MULLconst [19] x)
13228	// result: (LEAL2 x (LEAL8 <v.Type> x x))
13229	for {
13230		if auxIntToInt32(v.AuxInt) != 19 {
13231			break
13232		}
13233		x := v_0
13234		v.reset(OpAMD64LEAL2)
13235		v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13236		v0.AddArg2(x, x)
13237		v.AddArg2(x, v0)
13238		return true
13239	}
13240	// match: (MULLconst [21] x)
13241	// result: (LEAL4 x (LEAL4 <v.Type> x x))
13242	for {
13243		if auxIntToInt32(v.AuxInt) != 21 {
13244			break
13245		}
13246		x := v_0
13247		v.reset(OpAMD64LEAL4)
13248		v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13249		v0.AddArg2(x, x)
13250		v.AddArg2(x, v0)
13251		return true
13252	}
13253	// match: (MULLconst [25] x)
13254	// result: (LEAL8 x (LEAL2 <v.Type> x x))
13255	for {
13256		if auxIntToInt32(v.AuxInt) != 25 {
13257			break
13258		}
13259		x := v_0
13260		v.reset(OpAMD64LEAL8)
13261		v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13262		v0.AddArg2(x, x)
13263		v.AddArg2(x, v0)
13264		return true
13265	}
13266	// match: (MULLconst [27] x)
13267	// result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
13268	for {
13269		if auxIntToInt32(v.AuxInt) != 27 {
13270			break
13271		}
13272		x := v_0
13273		v.reset(OpAMD64LEAL8)
13274		v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13275		v0.AddArg2(x, x)
13276		v.AddArg2(v0, v0)
13277		return true
13278	}
13279	// match: (MULLconst [37] x)
13280	// result: (LEAL4 x (LEAL8 <v.Type> x x))
13281	for {
13282		if auxIntToInt32(v.AuxInt) != 37 {
13283			break
13284		}
13285		x := v_0
13286		v.reset(OpAMD64LEAL4)
13287		v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13288		v0.AddArg2(x, x)
13289		v.AddArg2(x, v0)
13290		return true
13291	}
13292	// match: (MULLconst [41] x)
13293	// result: (LEAL8 x (LEAL4 <v.Type> x x))
13294	for {
13295		if auxIntToInt32(v.AuxInt) != 41 {
13296			break
13297		}
13298		x := v_0
13299		v.reset(OpAMD64LEAL8)
13300		v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13301		v0.AddArg2(x, x)
13302		v.AddArg2(x, v0)
13303		return true
13304	}
13305	// match: (MULLconst [45] x)
13306	// result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
13307	for {
13308		if auxIntToInt32(v.AuxInt) != 45 {
13309			break
13310		}
13311		x := v_0
13312		v.reset(OpAMD64LEAL8)
13313		v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13314		v0.AddArg2(x, x)
13315		v.AddArg2(v0, v0)
13316		return true
13317	}
13318	// match: (MULLconst [73] x)
13319	// result: (LEAL8 x (LEAL8 <v.Type> x x))
13320	for {
13321		if auxIntToInt32(v.AuxInt) != 73 {
13322			break
13323		}
13324		x := v_0
13325		v.reset(OpAMD64LEAL8)
13326		v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13327		v0.AddArg2(x, x)
13328		v.AddArg2(x, v0)
13329		return true
13330	}
13331	// match: (MULLconst [81] x)
13332	// result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
13333	for {
13334		if auxIntToInt32(v.AuxInt) != 81 {
13335			break
13336		}
13337		x := v_0
13338		v.reset(OpAMD64LEAL8)
13339		v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13340		v0.AddArg2(x, x)
13341		v.AddArg2(v0, v0)
13342		return true
13343	}
13344	// match: (MULLconst [c] x)
13345	// cond: isPowerOfTwo64(int64(c)+1) && c >= 15
13346	// result: (SUBL (SHLLconst <v.Type> [int8(log64(int64(c)+1))] x) x)
13347	for {
13348		c := auxIntToInt32(v.AuxInt)
13349		x := v_0
13350		if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
13351			break
13352		}
13353		v.reset(OpAMD64SUBL)
13354		v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13355		v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
13356		v0.AddArg(x)
13357		v.AddArg2(v0, x)
13358		return true
13359	}
13360	// match: (MULLconst [c] x)
13361	// cond: isPowerOfTwo32(c-1) && c >= 17
13362	// result: (LEAL1 (SHLLconst <v.Type> [int8(log32(c-1))] x) x)
13363	for {
13364		c := auxIntToInt32(v.AuxInt)
13365		x := v_0
13366		if !(isPowerOfTwo32(c-1) && c >= 17) {
13367			break
13368		}
13369		v.reset(OpAMD64LEAL1)
13370		v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13371		v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
13372		v0.AddArg(x)
13373		v.AddArg2(v0, x)
13374		return true
13375	}
13376	// match: (MULLconst [c] x)
13377	// cond: isPowerOfTwo32(c-2) && c >= 34
13378	// result: (LEAL2 (SHLLconst <v.Type> [int8(log32(c-2))] x) x)
13379	for {
13380		c := auxIntToInt32(v.AuxInt)
13381		x := v_0
13382		if !(isPowerOfTwo32(c-2) && c >= 34) {
13383			break
13384		}
13385		v.reset(OpAMD64LEAL2)
13386		v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13387		v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
13388		v0.AddArg(x)
13389		v.AddArg2(v0, x)
13390		return true
13391	}
13392	// match: (MULLconst [c] x)
13393	// cond: isPowerOfTwo32(c-4) && c >= 68
13394	// result: (LEAL4 (SHLLconst <v.Type> [int8(log32(c-4))] x) x)
13395	for {
13396		c := auxIntToInt32(v.AuxInt)
13397		x := v_0
13398		if !(isPowerOfTwo32(c-4) && c >= 68) {
13399			break
13400		}
13401		v.reset(OpAMD64LEAL4)
13402		v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13403		v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
13404		v0.AddArg(x)
13405		v.AddArg2(v0, x)
13406		return true
13407	}
13408	// match: (MULLconst [c] x)
13409	// cond: isPowerOfTwo32(c-8) && c >= 136
13410	// result: (LEAL8 (SHLLconst <v.Type> [int8(log32(c-8))] x) x)
13411	for {
13412		c := auxIntToInt32(v.AuxInt)
13413		x := v_0
13414		if !(isPowerOfTwo32(c-8) && c >= 136) {
13415			break
13416		}
13417		v.reset(OpAMD64LEAL8)
13418		v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13419		v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
13420		v0.AddArg(x)
13421		v.AddArg2(v0, x)
13422		return true
13423	}
13424	// match: (MULLconst [c] x)
13425	// cond: c%3 == 0 && isPowerOfTwo32(c/3)
13426	// result: (SHLLconst [int8(log32(c/3))] (LEAL2 <v.Type> x x))
13427	for {
13428		c := auxIntToInt32(v.AuxInt)
13429		x := v_0
13430		if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
13431			break
13432		}
13433		v.reset(OpAMD64SHLLconst)
13434		v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
13435		v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13436		v0.AddArg2(x, x)
13437		v.AddArg(v0)
13438		return true
13439	}
13440	// match: (MULLconst [c] x)
13441	// cond: c%5 == 0 && isPowerOfTwo32(c/5)
13442	// result: (SHLLconst [int8(log32(c/5))] (LEAL4 <v.Type> x x))
13443	for {
13444		c := auxIntToInt32(v.AuxInt)
13445		x := v_0
13446		if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
13447			break
13448		}
13449		v.reset(OpAMD64SHLLconst)
13450		v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
13451		v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13452		v0.AddArg2(x, x)
13453		v.AddArg(v0)
13454		return true
13455	}
13456	// match: (MULLconst [c] x)
13457	// cond: c%9 == 0 && isPowerOfTwo32(c/9)
13458	// result: (SHLLconst [int8(log32(c/9))] (LEAL8 <v.Type> x x))
13459	for {
13460		c := auxIntToInt32(v.AuxInt)
13461		x := v_0
13462		if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
13463			break
13464		}
13465		v.reset(OpAMD64SHLLconst)
13466		v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
13467		v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13468		v0.AddArg2(x, x)
13469		v.AddArg(v0)
13470		return true
13471	}
13472	// match: (MULLconst [c] (MOVLconst [d]))
13473	// result: (MOVLconst [c*d])
13474	for {
13475		c := auxIntToInt32(v.AuxInt)
13476		if v_0.Op != OpAMD64MOVLconst {
13477			break
13478		}
13479		d := auxIntToInt32(v_0.AuxInt)
13480		v.reset(OpAMD64MOVLconst)
13481		v.AuxInt = int32ToAuxInt(c * d)
13482		return true
13483	}
13484	return false
13485}
13486func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
13487	v_1 := v.Args[1]
13488	v_0 := v.Args[0]
13489	// match: (MULQ x (MOVQconst [c]))
13490	// cond: is32Bit(c)
13491	// result: (MULQconst [int32(c)] x)
13492	for {
13493		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13494			x := v_0
13495			if v_1.Op != OpAMD64MOVQconst {
13496				continue
13497			}
13498			c := auxIntToInt64(v_1.AuxInt)
13499			if !(is32Bit(c)) {
13500				continue
13501			}
13502			v.reset(OpAMD64MULQconst)
13503			v.AuxInt = int32ToAuxInt(int32(c))
13504			v.AddArg(x)
13505			return true
13506		}
13507		break
13508	}
13509	return false
13510}
13511func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
13512	v_0 := v.Args[0]
13513	b := v.Block
13514	// match: (MULQconst [c] (MULQconst [d] x))
13515	// cond: is32Bit(int64(c)*int64(d))
13516	// result: (MULQconst [c * d] x)
13517	for {
13518		c := auxIntToInt32(v.AuxInt)
13519		if v_0.Op != OpAMD64MULQconst {
13520			break
13521		}
13522		d := auxIntToInt32(v_0.AuxInt)
13523		x := v_0.Args[0]
13524		if !(is32Bit(int64(c) * int64(d))) {
13525			break
13526		}
13527		v.reset(OpAMD64MULQconst)
13528		v.AuxInt = int32ToAuxInt(c * d)
13529		v.AddArg(x)
13530		return true
13531	}
13532	// match: (MULQconst [-9] x)
13533	// result: (NEGQ (LEAQ8 <v.Type> x x))
13534	for {
13535		if auxIntToInt32(v.AuxInt) != -9 {
13536			break
13537		}
13538		x := v_0
13539		v.reset(OpAMD64NEGQ)
13540		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13541		v0.AddArg2(x, x)
13542		v.AddArg(v0)
13543		return true
13544	}
13545	// match: (MULQconst [-5] x)
13546	// result: (NEGQ (LEAQ4 <v.Type> x x))
13547	for {
13548		if auxIntToInt32(v.AuxInt) != -5 {
13549			break
13550		}
13551		x := v_0
13552		v.reset(OpAMD64NEGQ)
13553		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13554		v0.AddArg2(x, x)
13555		v.AddArg(v0)
13556		return true
13557	}
13558	// match: (MULQconst [-3] x)
13559	// result: (NEGQ (LEAQ2 <v.Type> x x))
13560	for {
13561		if auxIntToInt32(v.AuxInt) != -3 {
13562			break
13563		}
13564		x := v_0
13565		v.reset(OpAMD64NEGQ)
13566		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13567		v0.AddArg2(x, x)
13568		v.AddArg(v0)
13569		return true
13570	}
13571	// match: (MULQconst [-1] x)
13572	// result: (NEGQ x)
13573	for {
13574		if auxIntToInt32(v.AuxInt) != -1 {
13575			break
13576		}
13577		x := v_0
13578		v.reset(OpAMD64NEGQ)
13579		v.AddArg(x)
13580		return true
13581	}
13582	// match: (MULQconst [ 0] _)
13583	// result: (MOVQconst [0])
13584	for {
13585		if auxIntToInt32(v.AuxInt) != 0 {
13586			break
13587		}
13588		v.reset(OpAMD64MOVQconst)
13589		v.AuxInt = int64ToAuxInt(0)
13590		return true
13591	}
13592	// match: (MULQconst [ 1] x)
13593	// result: x
13594	for {
13595		if auxIntToInt32(v.AuxInt) != 1 {
13596			break
13597		}
13598		x := v_0
13599		v.copyOf(x)
13600		return true
13601	}
13602	// match: (MULQconst [ 3] x)
13603	// result: (LEAQ2 x x)
13604	for {
13605		if auxIntToInt32(v.AuxInt) != 3 {
13606			break
13607		}
13608		x := v_0
13609		v.reset(OpAMD64LEAQ2)
13610		v.AddArg2(x, x)
13611		return true
13612	}
13613	// match: (MULQconst [ 5] x)
13614	// result: (LEAQ4 x x)
13615	for {
13616		if auxIntToInt32(v.AuxInt) != 5 {
13617			break
13618		}
13619		x := v_0
13620		v.reset(OpAMD64LEAQ4)
13621		v.AddArg2(x, x)
13622		return true
13623	}
13624	// match: (MULQconst [ 7] x)
13625	// result: (LEAQ2 x (LEAQ2 <v.Type> x x))
13626	for {
13627		if auxIntToInt32(v.AuxInt) != 7 {
13628			break
13629		}
13630		x := v_0
13631		v.reset(OpAMD64LEAQ2)
13632		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13633		v0.AddArg2(x, x)
13634		v.AddArg2(x, v0)
13635		return true
13636	}
13637	// match: (MULQconst [ 9] x)
13638	// result: (LEAQ8 x x)
13639	for {
13640		if auxIntToInt32(v.AuxInt) != 9 {
13641			break
13642		}
13643		x := v_0
13644		v.reset(OpAMD64LEAQ8)
13645		v.AddArg2(x, x)
13646		return true
13647	}
13648	// match: (MULQconst [11] x)
13649	// result: (LEAQ2 x (LEAQ4 <v.Type> x x))
13650	for {
13651		if auxIntToInt32(v.AuxInt) != 11 {
13652			break
13653		}
13654		x := v_0
13655		v.reset(OpAMD64LEAQ2)
13656		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13657		v0.AddArg2(x, x)
13658		v.AddArg2(x, v0)
13659		return true
13660	}
13661	// match: (MULQconst [13] x)
13662	// result: (LEAQ4 x (LEAQ2 <v.Type> x x))
13663	for {
13664		if auxIntToInt32(v.AuxInt) != 13 {
13665			break
13666		}
13667		x := v_0
13668		v.reset(OpAMD64LEAQ4)
13669		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13670		v0.AddArg2(x, x)
13671		v.AddArg2(x, v0)
13672		return true
13673	}
13674	// match: (MULQconst [19] x)
13675	// result: (LEAQ2 x (LEAQ8 <v.Type> x x))
13676	for {
13677		if auxIntToInt32(v.AuxInt) != 19 {
13678			break
13679		}
13680		x := v_0
13681		v.reset(OpAMD64LEAQ2)
13682		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13683		v0.AddArg2(x, x)
13684		v.AddArg2(x, v0)
13685		return true
13686	}
13687	// match: (MULQconst [21] x)
13688	// result: (LEAQ4 x (LEAQ4 <v.Type> x x))
13689	for {
13690		if auxIntToInt32(v.AuxInt) != 21 {
13691			break
13692		}
13693		x := v_0
13694		v.reset(OpAMD64LEAQ4)
13695		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13696		v0.AddArg2(x, x)
13697		v.AddArg2(x, v0)
13698		return true
13699	}
13700	// match: (MULQconst [25] x)
13701	// result: (LEAQ8 x (LEAQ2 <v.Type> x x))
13702	for {
13703		if auxIntToInt32(v.AuxInt) != 25 {
13704			break
13705		}
13706		x := v_0
13707		v.reset(OpAMD64LEAQ8)
13708		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13709		v0.AddArg2(x, x)
13710		v.AddArg2(x, v0)
13711		return true
13712	}
13713	// match: (MULQconst [27] x)
13714	// result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x))
13715	for {
13716		if auxIntToInt32(v.AuxInt) != 27 {
13717			break
13718		}
13719		x := v_0
13720		v.reset(OpAMD64LEAQ8)
13721		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13722		v0.AddArg2(x, x)
13723		v.AddArg2(v0, v0)
13724		return true
13725	}
13726	// match: (MULQconst [37] x)
13727	// result: (LEAQ4 x (LEAQ8 <v.Type> x x))
13728	for {
13729		if auxIntToInt32(v.AuxInt) != 37 {
13730			break
13731		}
13732		x := v_0
13733		v.reset(OpAMD64LEAQ4)
13734		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13735		v0.AddArg2(x, x)
13736		v.AddArg2(x, v0)
13737		return true
13738	}
13739	// match: (MULQconst [41] x)
13740	// result: (LEAQ8 x (LEAQ4 <v.Type> x x))
13741	for {
13742		if auxIntToInt32(v.AuxInt) != 41 {
13743			break
13744		}
13745		x := v_0
13746		v.reset(OpAMD64LEAQ8)
13747		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13748		v0.AddArg2(x, x)
13749		v.AddArg2(x, v0)
13750		return true
13751	}
13752	// match: (MULQconst [45] x)
13753	// result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x))
13754	for {
13755		if auxIntToInt32(v.AuxInt) != 45 {
13756			break
13757		}
13758		x := v_0
13759		v.reset(OpAMD64LEAQ8)
13760		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13761		v0.AddArg2(x, x)
13762		v.AddArg2(v0, v0)
13763		return true
13764	}
13765	// match: (MULQconst [73] x)
13766	// result: (LEAQ8 x (LEAQ8 <v.Type> x x))
13767	for {
13768		if auxIntToInt32(v.AuxInt) != 73 {
13769			break
13770		}
13771		x := v_0
13772		v.reset(OpAMD64LEAQ8)
13773		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13774		v0.AddArg2(x, x)
13775		v.AddArg2(x, v0)
13776		return true
13777	}
13778	// match: (MULQconst [81] x)
13779	// result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x))
13780	for {
13781		if auxIntToInt32(v.AuxInt) != 81 {
13782			break
13783		}
13784		x := v_0
13785		v.reset(OpAMD64LEAQ8)
13786		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13787		v0.AddArg2(x, x)
13788		v.AddArg2(v0, v0)
13789		return true
13790	}
13791	// match: (MULQconst [c] x)
13792	// cond: isPowerOfTwo64(int64(c)+1) && c >= 15
13793	// result: (SUBQ (SHLQconst <v.Type> [int8(log64(int64(c)+1))] x) x)
13794	for {
13795		c := auxIntToInt32(v.AuxInt)
13796		x := v_0
13797		if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
13798			break
13799		}
13800		v.reset(OpAMD64SUBQ)
13801		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13802		v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
13803		v0.AddArg(x)
13804		v.AddArg2(v0, x)
13805		return true
13806	}
13807	// match: (MULQconst [c] x)
13808	// cond: isPowerOfTwo32(c-1) && c >= 17
13809	// result: (LEAQ1 (SHLQconst <v.Type> [int8(log32(c-1))] x) x)
13810	for {
13811		c := auxIntToInt32(v.AuxInt)
13812		x := v_0
13813		if !(isPowerOfTwo32(c-1) && c >= 17) {
13814			break
13815		}
13816		v.reset(OpAMD64LEAQ1)
13817		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13818		v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
13819		v0.AddArg(x)
13820		v.AddArg2(v0, x)
13821		return true
13822	}
13823	// match: (MULQconst [c] x)
13824	// cond: isPowerOfTwo32(c-2) && c >= 34
13825	// result: (LEAQ2 (SHLQconst <v.Type> [int8(log32(c-2))] x) x)
13826	for {
13827		c := auxIntToInt32(v.AuxInt)
13828		x := v_0
13829		if !(isPowerOfTwo32(c-2) && c >= 34) {
13830			break
13831		}
13832		v.reset(OpAMD64LEAQ2)
13833		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13834		v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
13835		v0.AddArg(x)
13836		v.AddArg2(v0, x)
13837		return true
13838	}
13839	// match: (MULQconst [c] x)
13840	// cond: isPowerOfTwo32(c-4) && c >= 68
13841	// result: (LEAQ4 (SHLQconst <v.Type> [int8(log32(c-4))] x) x)
13842	for {
13843		c := auxIntToInt32(v.AuxInt)
13844		x := v_0
13845		if !(isPowerOfTwo32(c-4) && c >= 68) {
13846			break
13847		}
13848		v.reset(OpAMD64LEAQ4)
13849		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13850		v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
13851		v0.AddArg(x)
13852		v.AddArg2(v0, x)
13853		return true
13854	}
13855	// match: (MULQconst [c] x)
13856	// cond: isPowerOfTwo32(c-8) && c >= 136
13857	// result: (LEAQ8 (SHLQconst <v.Type> [int8(log32(c-8))] x) x)
13858	for {
13859		c := auxIntToInt32(v.AuxInt)
13860		x := v_0
13861		if !(isPowerOfTwo32(c-8) && c >= 136) {
13862			break
13863		}
13864		v.reset(OpAMD64LEAQ8)
13865		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
13866		v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
13867		v0.AddArg(x)
13868		v.AddArg2(v0, x)
13869		return true
13870	}
13871	// match: (MULQconst [c] x)
13872	// cond: c%3 == 0 && isPowerOfTwo32(c/3)
13873	// result: (SHLQconst [int8(log32(c/3))] (LEAQ2 <v.Type> x x))
13874	for {
13875		c := auxIntToInt32(v.AuxInt)
13876		x := v_0
13877		if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
13878			break
13879		}
13880		v.reset(OpAMD64SHLQconst)
13881		v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
13882		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13883		v0.AddArg2(x, x)
13884		v.AddArg(v0)
13885		return true
13886	}
13887	// match: (MULQconst [c] x)
13888	// cond: c%5 == 0 && isPowerOfTwo32(c/5)
13889	// result: (SHLQconst [int8(log32(c/5))] (LEAQ4 <v.Type> x x))
13890	for {
13891		c := auxIntToInt32(v.AuxInt)
13892		x := v_0
13893		if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
13894			break
13895		}
13896		v.reset(OpAMD64SHLQconst)
13897		v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
13898		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13899		v0.AddArg2(x, x)
13900		v.AddArg(v0)
13901		return true
13902	}
13903	// match: (MULQconst [c] x)
13904	// cond: c%9 == 0 && isPowerOfTwo32(c/9)
13905	// result: (SHLQconst [int8(log32(c/9))] (LEAQ8 <v.Type> x x))
13906	for {
13907		c := auxIntToInt32(v.AuxInt)
13908		x := v_0
13909		if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
13910			break
13911		}
13912		v.reset(OpAMD64SHLQconst)
13913		v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
13914		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13915		v0.AddArg2(x, x)
13916		v.AddArg(v0)
13917		return true
13918	}
13919	// match: (MULQconst [c] (MOVQconst [d]))
13920	// result: (MOVQconst [int64(c)*d])
13921	for {
13922		c := auxIntToInt32(v.AuxInt)
13923		if v_0.Op != OpAMD64MOVQconst {
13924			break
13925		}
13926		d := auxIntToInt64(v_0.AuxInt)
13927		v.reset(OpAMD64MOVQconst)
13928		v.AuxInt = int64ToAuxInt(int64(c) * d)
13929		return true
13930	}
13931	// match: (MULQconst [c] (NEGQ x))
13932	// cond: c != -(1<<31)
13933	// result: (MULQconst [-c] x)
13934	for {
13935		c := auxIntToInt32(v.AuxInt)
13936		if v_0.Op != OpAMD64NEGQ {
13937			break
13938		}
13939		x := v_0.Args[0]
13940		if !(c != -(1 << 31)) {
13941			break
13942		}
13943		v.reset(OpAMD64MULQconst)
13944		v.AuxInt = int32ToAuxInt(-c)
13945		v.AddArg(x)
13946		return true
13947	}
13948	return false
13949}
13950func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
13951	v_1 := v.Args[1]
13952	v_0 := v.Args[0]
13953	// match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
13954	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
13955	// result: (MULSDload x [off] {sym} ptr mem)
13956	for {
13957		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13958			x := v_0
13959			l := v_1
13960			if l.Op != OpAMD64MOVSDload {
13961				continue
13962			}
13963			off := auxIntToInt32(l.AuxInt)
13964			sym := auxToSym(l.Aux)
13965			mem := l.Args[1]
13966			ptr := l.Args[0]
13967			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
13968				continue
13969			}
13970			v.reset(OpAMD64MULSDload)
13971			v.AuxInt = int32ToAuxInt(off)
13972			v.Aux = symToAux(sym)
13973			v.AddArg3(x, ptr, mem)
13974			return true
13975		}
13976		break
13977	}
13978	return false
13979}
13980func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
13981	v_2 := v.Args[2]
13982	v_1 := v.Args[1]
13983	v_0 := v.Args[0]
13984	b := v.Block
13985	typ := &b.Func.Config.Types
13986	// match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem)
13987	// cond: is32Bit(int64(off1)+int64(off2))
13988	// result: (MULSDload [off1+off2] {sym} val base mem)
13989	for {
13990		off1 := auxIntToInt32(v.AuxInt)
13991		sym := auxToSym(v.Aux)
13992		val := v_0
13993		if v_1.Op != OpAMD64ADDQconst {
13994			break
13995		}
13996		off2 := auxIntToInt32(v_1.AuxInt)
13997		base := v_1.Args[0]
13998		mem := v_2
13999		if !(is32Bit(int64(off1) + int64(off2))) {
14000			break
14001		}
14002		v.reset(OpAMD64MULSDload)
14003		v.AuxInt = int32ToAuxInt(off1 + off2)
14004		v.Aux = symToAux(sym)
14005		v.AddArg3(val, base, mem)
14006		return true
14007	}
14008	// match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
14009	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14010	// result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
14011	for {
14012		off1 := auxIntToInt32(v.AuxInt)
14013		sym1 := auxToSym(v.Aux)
14014		val := v_0
14015		if v_1.Op != OpAMD64LEAQ {
14016			break
14017		}
14018		off2 := auxIntToInt32(v_1.AuxInt)
14019		sym2 := auxToSym(v_1.Aux)
14020		base := v_1.Args[0]
14021		mem := v_2
14022		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14023			break
14024		}
14025		v.reset(OpAMD64MULSDload)
14026		v.AuxInt = int32ToAuxInt(off1 + off2)
14027		v.Aux = symToAux(mergeSym(sym1, sym2))
14028		v.AddArg3(val, base, mem)
14029		return true
14030	}
14031	// match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
14032	// result: (MULSD x (MOVQi2f y))
14033	for {
14034		off := auxIntToInt32(v.AuxInt)
14035		sym := auxToSym(v.Aux)
14036		x := v_0
14037		ptr := v_1
14038		if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14039			break
14040		}
14041		y := v_2.Args[1]
14042		if ptr != v_2.Args[0] {
14043			break
14044		}
14045		v.reset(OpAMD64MULSD)
14046		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
14047		v0.AddArg(y)
14048		v.AddArg2(x, v0)
14049		return true
14050	}
14051	return false
14052}
14053func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
14054	v_1 := v.Args[1]
14055	v_0 := v.Args[0]
14056	// match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
14057	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
14058	// result: (MULSSload x [off] {sym} ptr mem)
14059	for {
14060		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14061			x := v_0
14062			l := v_1
14063			if l.Op != OpAMD64MOVSSload {
14064				continue
14065			}
14066			off := auxIntToInt32(l.AuxInt)
14067			sym := auxToSym(l.Aux)
14068			mem := l.Args[1]
14069			ptr := l.Args[0]
14070			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14071				continue
14072			}
14073			v.reset(OpAMD64MULSSload)
14074			v.AuxInt = int32ToAuxInt(off)
14075			v.Aux = symToAux(sym)
14076			v.AddArg3(x, ptr, mem)
14077			return true
14078		}
14079		break
14080	}
14081	return false
14082}
14083func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
14084	v_2 := v.Args[2]
14085	v_1 := v.Args[1]
14086	v_0 := v.Args[0]
14087	b := v.Block
14088	typ := &b.Func.Config.Types
14089	// match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem)
14090	// cond: is32Bit(int64(off1)+int64(off2))
14091	// result: (MULSSload [off1+off2] {sym} val base mem)
14092	for {
14093		off1 := auxIntToInt32(v.AuxInt)
14094		sym := auxToSym(v.Aux)
14095		val := v_0
14096		if v_1.Op != OpAMD64ADDQconst {
14097			break
14098		}
14099		off2 := auxIntToInt32(v_1.AuxInt)
14100		base := v_1.Args[0]
14101		mem := v_2
14102		if !(is32Bit(int64(off1) + int64(off2))) {
14103			break
14104		}
14105		v.reset(OpAMD64MULSSload)
14106		v.AuxInt = int32ToAuxInt(off1 + off2)
14107		v.Aux = symToAux(sym)
14108		v.AddArg3(val, base, mem)
14109		return true
14110	}
14111	// match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
14112	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14113	// result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
14114	for {
14115		off1 := auxIntToInt32(v.AuxInt)
14116		sym1 := auxToSym(v.Aux)
14117		val := v_0
14118		if v_1.Op != OpAMD64LEAQ {
14119			break
14120		}
14121		off2 := auxIntToInt32(v_1.AuxInt)
14122		sym2 := auxToSym(v_1.Aux)
14123		base := v_1.Args[0]
14124		mem := v_2
14125		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14126			break
14127		}
14128		v.reset(OpAMD64MULSSload)
14129		v.AuxInt = int32ToAuxInt(off1 + off2)
14130		v.Aux = symToAux(mergeSym(sym1, sym2))
14131		v.AddArg3(val, base, mem)
14132		return true
14133	}
14134	// match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
14135	// result: (MULSS x (MOVLi2f y))
14136	for {
14137		off := auxIntToInt32(v.AuxInt)
14138		sym := auxToSym(v.Aux)
14139		x := v_0
14140		ptr := v_1
14141		if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14142			break
14143		}
14144		y := v_2.Args[1]
14145		if ptr != v_2.Args[0] {
14146			break
14147		}
14148		v.reset(OpAMD64MULSS)
14149		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
14150		v0.AddArg(y)
14151		v.AddArg2(x, v0)
14152		return true
14153	}
14154	return false
14155}
14156func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
14157	v_0 := v.Args[0]
14158	// match: (NEGL (NEGL x))
14159	// result: x
14160	for {
14161		if v_0.Op != OpAMD64NEGL {
14162			break
14163		}
14164		x := v_0.Args[0]
14165		v.copyOf(x)
14166		return true
14167	}
14168	// match: (NEGL s:(SUBL x y))
14169	// cond: s.Uses == 1
14170	// result: (SUBL y x)
14171	for {
14172		s := v_0
14173		if s.Op != OpAMD64SUBL {
14174			break
14175		}
14176		y := s.Args[1]
14177		x := s.Args[0]
14178		if !(s.Uses == 1) {
14179			break
14180		}
14181		v.reset(OpAMD64SUBL)
14182		v.AddArg2(y, x)
14183		return true
14184	}
14185	// match: (NEGL (MOVLconst [c]))
14186	// result: (MOVLconst [-c])
14187	for {
14188		if v_0.Op != OpAMD64MOVLconst {
14189			break
14190		}
14191		c := auxIntToInt32(v_0.AuxInt)
14192		v.reset(OpAMD64MOVLconst)
14193		v.AuxInt = int32ToAuxInt(-c)
14194		return true
14195	}
14196	return false
14197}
14198func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
14199	v_0 := v.Args[0]
14200	// match: (NEGQ (NEGQ x))
14201	// result: x
14202	for {
14203		if v_0.Op != OpAMD64NEGQ {
14204			break
14205		}
14206		x := v_0.Args[0]
14207		v.copyOf(x)
14208		return true
14209	}
14210	// match: (NEGQ s:(SUBQ x y))
14211	// cond: s.Uses == 1
14212	// result: (SUBQ y x)
14213	for {
14214		s := v_0
14215		if s.Op != OpAMD64SUBQ {
14216			break
14217		}
14218		y := s.Args[1]
14219		x := s.Args[0]
14220		if !(s.Uses == 1) {
14221			break
14222		}
14223		v.reset(OpAMD64SUBQ)
14224		v.AddArg2(y, x)
14225		return true
14226	}
14227	// match: (NEGQ (MOVQconst [c]))
14228	// result: (MOVQconst [-c])
14229	for {
14230		if v_0.Op != OpAMD64MOVQconst {
14231			break
14232		}
14233		c := auxIntToInt64(v_0.AuxInt)
14234		v.reset(OpAMD64MOVQconst)
14235		v.AuxInt = int64ToAuxInt(-c)
14236		return true
14237	}
14238	// match: (NEGQ (ADDQconst [c] (NEGQ x)))
14239	// cond: c != -(1<<31)
14240	// result: (ADDQconst [-c] x)
14241	for {
14242		if v_0.Op != OpAMD64ADDQconst {
14243			break
14244		}
14245		c := auxIntToInt32(v_0.AuxInt)
14246		v_0_0 := v_0.Args[0]
14247		if v_0_0.Op != OpAMD64NEGQ {
14248			break
14249		}
14250		x := v_0_0.Args[0]
14251		if !(c != -(1 << 31)) {
14252			break
14253		}
14254		v.reset(OpAMD64ADDQconst)
14255		v.AuxInt = int32ToAuxInt(-c)
14256		v.AddArg(x)
14257		return true
14258	}
14259	return false
14260}
14261func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
14262	v_0 := v.Args[0]
14263	// match: (NOTL (MOVLconst [c]))
14264	// result: (MOVLconst [^c])
14265	for {
14266		if v_0.Op != OpAMD64MOVLconst {
14267			break
14268		}
14269		c := auxIntToInt32(v_0.AuxInt)
14270		v.reset(OpAMD64MOVLconst)
14271		v.AuxInt = int32ToAuxInt(^c)
14272		return true
14273	}
14274	return false
14275}
14276func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
14277	v_0 := v.Args[0]
14278	// match: (NOTQ (MOVQconst [c]))
14279	// result: (MOVQconst [^c])
14280	for {
14281		if v_0.Op != OpAMD64MOVQconst {
14282			break
14283		}
14284		c := auxIntToInt64(v_0.AuxInt)
14285		v.reset(OpAMD64MOVQconst)
14286		v.AuxInt = int64ToAuxInt(^c)
14287		return true
14288	}
14289	return false
14290}
14291func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
14292	v_1 := v.Args[1]
14293	v_0 := v.Args[0]
14294	// match: (ORL (SHLL (MOVLconst [1]) y) x)
14295	// result: (BTSL x y)
14296	for {
14297		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14298			if v_0.Op != OpAMD64SHLL {
14299				continue
14300			}
14301			y := v_0.Args[1]
14302			v_0_0 := v_0.Args[0]
14303			if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
14304				continue
14305			}
14306			x := v_1
14307			v.reset(OpAMD64BTSL)
14308			v.AddArg2(x, y)
14309			return true
14310		}
14311		break
14312	}
14313	// match: (ORL x (MOVLconst [c]))
14314	// result: (ORLconst [c] x)
14315	for {
14316		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14317			x := v_0
14318			if v_1.Op != OpAMD64MOVLconst {
14319				continue
14320			}
14321			c := auxIntToInt32(v_1.AuxInt)
14322			v.reset(OpAMD64ORLconst)
14323			v.AuxInt = int32ToAuxInt(c)
14324			v.AddArg(x)
14325			return true
14326		}
14327		break
14328	}
14329	// match: (ORL x x)
14330	// result: x
14331	for {
14332		x := v_0
14333		if x != v_1 {
14334			break
14335		}
14336		v.copyOf(x)
14337		return true
14338	}
14339	// match: (ORL x l:(MOVLload [off] {sym} ptr mem))
14340	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
14341	// result: (ORLload x [off] {sym} ptr mem)
14342	for {
14343		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14344			x := v_0
14345			l := v_1
14346			if l.Op != OpAMD64MOVLload {
14347				continue
14348			}
14349			off := auxIntToInt32(l.AuxInt)
14350			sym := auxToSym(l.Aux)
14351			mem := l.Args[1]
14352			ptr := l.Args[0]
14353			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14354				continue
14355			}
14356			v.reset(OpAMD64ORLload)
14357			v.AuxInt = int32ToAuxInt(off)
14358			v.Aux = symToAux(sym)
14359			v.AddArg3(x, ptr, mem)
14360			return true
14361		}
14362		break
14363	}
14364	return false
14365}
14366func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
14367	v_0 := v.Args[0]
14368	// match: (ORLconst [c] (ORLconst [d] x))
14369	// result: (ORLconst [c | d] x)
14370	for {
14371		c := auxIntToInt32(v.AuxInt)
14372		if v_0.Op != OpAMD64ORLconst {
14373			break
14374		}
14375		d := auxIntToInt32(v_0.AuxInt)
14376		x := v_0.Args[0]
14377		v.reset(OpAMD64ORLconst)
14378		v.AuxInt = int32ToAuxInt(c | d)
14379		v.AddArg(x)
14380		return true
14381	}
14382	// match: (ORLconst [c] x)
14383	// cond: c==0
14384	// result: x
14385	for {
14386		c := auxIntToInt32(v.AuxInt)
14387		x := v_0
14388		if !(c == 0) {
14389			break
14390		}
14391		v.copyOf(x)
14392		return true
14393	}
14394	// match: (ORLconst [c] _)
14395	// cond: c==-1
14396	// result: (MOVLconst [-1])
14397	for {
14398		c := auxIntToInt32(v.AuxInt)
14399		if !(c == -1) {
14400			break
14401		}
14402		v.reset(OpAMD64MOVLconst)
14403		v.AuxInt = int32ToAuxInt(-1)
14404		return true
14405	}
14406	// match: (ORLconst [c] (MOVLconst [d]))
14407	// result: (MOVLconst [c|d])
14408	for {
14409		c := auxIntToInt32(v.AuxInt)
14410		if v_0.Op != OpAMD64MOVLconst {
14411			break
14412		}
14413		d := auxIntToInt32(v_0.AuxInt)
14414		v.reset(OpAMD64MOVLconst)
14415		v.AuxInt = int32ToAuxInt(c | d)
14416		return true
14417	}
14418	return false
14419}
14420func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
14421	v_1 := v.Args[1]
14422	v_0 := v.Args[0]
14423	// match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
14424	// cond: ValAndOff(valoff1).canAdd32(off2)
14425	// result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
14426	for {
14427		valoff1 := auxIntToValAndOff(v.AuxInt)
14428		sym := auxToSym(v.Aux)
14429		if v_0.Op != OpAMD64ADDQconst {
14430			break
14431		}
14432		off2 := auxIntToInt32(v_0.AuxInt)
14433		base := v_0.Args[0]
14434		mem := v_1
14435		if !(ValAndOff(valoff1).canAdd32(off2)) {
14436			break
14437		}
14438		v.reset(OpAMD64ORLconstmodify)
14439		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14440		v.Aux = symToAux(sym)
14441		v.AddArg2(base, mem)
14442		return true
14443	}
14444	// match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
14445	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
14446	// result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
14447	for {
14448		valoff1 := auxIntToValAndOff(v.AuxInt)
14449		sym1 := auxToSym(v.Aux)
14450		if v_0.Op != OpAMD64LEAQ {
14451			break
14452		}
14453		off2 := auxIntToInt32(v_0.AuxInt)
14454		sym2 := auxToSym(v_0.Aux)
14455		base := v_0.Args[0]
14456		mem := v_1
14457		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14458			break
14459		}
14460		v.reset(OpAMD64ORLconstmodify)
14461		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14462		v.Aux = symToAux(mergeSym(sym1, sym2))
14463		v.AddArg2(base, mem)
14464		return true
14465	}
14466	return false
14467}
14468func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
14469	v_2 := v.Args[2]
14470	v_1 := v.Args[1]
14471	v_0 := v.Args[0]
14472	b := v.Block
14473	typ := &b.Func.Config.Types
14474	// match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem)
14475	// cond: is32Bit(int64(off1)+int64(off2))
14476	// result: (ORLload [off1+off2] {sym} val base mem)
14477	for {
14478		off1 := auxIntToInt32(v.AuxInt)
14479		sym := auxToSym(v.Aux)
14480		val := v_0
14481		if v_1.Op != OpAMD64ADDQconst {
14482			break
14483		}
14484		off2 := auxIntToInt32(v_1.AuxInt)
14485		base := v_1.Args[0]
14486		mem := v_2
14487		if !(is32Bit(int64(off1) + int64(off2))) {
14488			break
14489		}
14490		v.reset(OpAMD64ORLload)
14491		v.AuxInt = int32ToAuxInt(off1 + off2)
14492		v.Aux = symToAux(sym)
14493		v.AddArg3(val, base, mem)
14494		return true
14495	}
14496	// match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
14497	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14498	// result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
14499	for {
14500		off1 := auxIntToInt32(v.AuxInt)
14501		sym1 := auxToSym(v.Aux)
14502		val := v_0
14503		if v_1.Op != OpAMD64LEAQ {
14504			break
14505		}
14506		off2 := auxIntToInt32(v_1.AuxInt)
14507		sym2 := auxToSym(v_1.Aux)
14508		base := v_1.Args[0]
14509		mem := v_2
14510		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14511			break
14512		}
14513		v.reset(OpAMD64ORLload)
14514		v.AuxInt = int32ToAuxInt(off1 + off2)
14515		v.Aux = symToAux(mergeSym(sym1, sym2))
14516		v.AddArg3(val, base, mem)
14517		return true
14518	}
14519	// match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
14520	// result: ( ORL x (MOVLf2i y))
14521	for {
14522		off := auxIntToInt32(v.AuxInt)
14523		sym := auxToSym(v.Aux)
14524		x := v_0
14525		ptr := v_1
14526		if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14527			break
14528		}
14529		y := v_2.Args[1]
14530		if ptr != v_2.Args[0] {
14531			break
14532		}
14533		v.reset(OpAMD64ORL)
14534		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
14535		v0.AddArg(y)
14536		v.AddArg2(x, v0)
14537		return true
14538	}
14539	return false
14540}
14541func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
14542	v_2 := v.Args[2]
14543	v_1 := v.Args[1]
14544	v_0 := v.Args[0]
14545	// match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
14546	// cond: is32Bit(int64(off1)+int64(off2))
14547	// result: (ORLmodify [off1+off2] {sym} base val mem)
14548	for {
14549		off1 := auxIntToInt32(v.AuxInt)
14550		sym := auxToSym(v.Aux)
14551		if v_0.Op != OpAMD64ADDQconst {
14552			break
14553		}
14554		off2 := auxIntToInt32(v_0.AuxInt)
14555		base := v_0.Args[0]
14556		val := v_1
14557		mem := v_2
14558		if !(is32Bit(int64(off1) + int64(off2))) {
14559			break
14560		}
14561		v.reset(OpAMD64ORLmodify)
14562		v.AuxInt = int32ToAuxInt(off1 + off2)
14563		v.Aux = symToAux(sym)
14564		v.AddArg3(base, val, mem)
14565		return true
14566	}
14567	// match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
14568	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14569	// result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
14570	for {
14571		off1 := auxIntToInt32(v.AuxInt)
14572		sym1 := auxToSym(v.Aux)
14573		if v_0.Op != OpAMD64LEAQ {
14574			break
14575		}
14576		off2 := auxIntToInt32(v_0.AuxInt)
14577		sym2 := auxToSym(v_0.Aux)
14578		base := v_0.Args[0]
14579		val := v_1
14580		mem := v_2
14581		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14582			break
14583		}
14584		v.reset(OpAMD64ORLmodify)
14585		v.AuxInt = int32ToAuxInt(off1 + off2)
14586		v.Aux = symToAux(mergeSym(sym1, sym2))
14587		v.AddArg3(base, val, mem)
14588		return true
14589	}
14590	return false
14591}
14592func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
14593	v_1 := v.Args[1]
14594	v_0 := v.Args[0]
14595	// match: (ORQ (SHLQ (MOVQconst [1]) y) x)
14596	// result: (BTSQ x y)
14597	for {
14598		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14599			if v_0.Op != OpAMD64SHLQ {
14600				continue
14601			}
14602			y := v_0.Args[1]
14603			v_0_0 := v_0.Args[0]
14604			if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
14605				continue
14606			}
14607			x := v_1
14608			v.reset(OpAMD64BTSQ)
14609			v.AddArg2(x, y)
14610			return true
14611		}
14612		break
14613	}
14614	// match: (ORQ (MOVQconst [c]) x)
14615	// cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
14616	// result: (BTSQconst [int8(log64(c))] x)
14617	for {
14618		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14619			if v_0.Op != OpAMD64MOVQconst {
14620				continue
14621			}
14622			c := auxIntToInt64(v_0.AuxInt)
14623			x := v_1
14624			if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
14625				continue
14626			}
14627			v.reset(OpAMD64BTSQconst)
14628			v.AuxInt = int8ToAuxInt(int8(log64(c)))
14629			v.AddArg(x)
14630			return true
14631		}
14632		break
14633	}
14634	// match: (ORQ x (MOVQconst [c]))
14635	// cond: is32Bit(c)
14636	// result: (ORQconst [int32(c)] x)
14637	for {
14638		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14639			x := v_0
14640			if v_1.Op != OpAMD64MOVQconst {
14641				continue
14642			}
14643			c := auxIntToInt64(v_1.AuxInt)
14644			if !(is32Bit(c)) {
14645				continue
14646			}
14647			v.reset(OpAMD64ORQconst)
14648			v.AuxInt = int32ToAuxInt(int32(c))
14649			v.AddArg(x)
14650			return true
14651		}
14652		break
14653	}
14654	// match: (ORQ x (MOVLconst [c]))
14655	// result: (ORQconst [c] x)
14656	for {
14657		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14658			x := v_0
14659			if v_1.Op != OpAMD64MOVLconst {
14660				continue
14661			}
14662			c := auxIntToInt32(v_1.AuxInt)
14663			v.reset(OpAMD64ORQconst)
14664			v.AuxInt = int32ToAuxInt(c)
14665			v.AddArg(x)
14666			return true
14667		}
14668		break
14669	}
14670	// match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits)))
14671	// result: (SHRDQ lo hi bits)
14672	for {
14673		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14674			if v_0.Op != OpAMD64SHRQ {
14675				continue
14676			}
14677			bits := v_0.Args[1]
14678			lo := v_0.Args[0]
14679			if v_1.Op != OpAMD64SHLQ {
14680				continue
14681			}
14682			_ = v_1.Args[1]
14683			hi := v_1.Args[0]
14684			v_1_1 := v_1.Args[1]
14685			if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14686				continue
14687			}
14688			v.reset(OpAMD64SHRDQ)
14689			v.AddArg3(lo, hi, bits)
14690			return true
14691		}
14692		break
14693	}
14694	// match: (ORQ (SHLQ lo bits) (SHRQ hi (NEGQ bits)))
14695	// result: (SHLDQ lo hi bits)
14696	for {
14697		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14698			if v_0.Op != OpAMD64SHLQ {
14699				continue
14700			}
14701			bits := v_0.Args[1]
14702			lo := v_0.Args[0]
14703			if v_1.Op != OpAMD64SHRQ {
14704				continue
14705			}
14706			_ = v_1.Args[1]
14707			hi := v_1.Args[0]
14708			v_1_1 := v_1.Args[1]
14709			if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14710				continue
14711			}
14712			v.reset(OpAMD64SHLDQ)
14713			v.AddArg3(lo, hi, bits)
14714			return true
14715		}
14716		break
14717	}
14718	// match: (ORQ (SHRXQ lo bits) (SHLXQ hi (NEGQ bits)))
14719	// result: (SHRDQ lo hi bits)
14720	for {
14721		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14722			if v_0.Op != OpAMD64SHRXQ {
14723				continue
14724			}
14725			bits := v_0.Args[1]
14726			lo := v_0.Args[0]
14727			if v_1.Op != OpAMD64SHLXQ {
14728				continue
14729			}
14730			_ = v_1.Args[1]
14731			hi := v_1.Args[0]
14732			v_1_1 := v_1.Args[1]
14733			if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14734				continue
14735			}
14736			v.reset(OpAMD64SHRDQ)
14737			v.AddArg3(lo, hi, bits)
14738			return true
14739		}
14740		break
14741	}
14742	// match: (ORQ (SHLXQ lo bits) (SHRXQ hi (NEGQ bits)))
14743	// result: (SHLDQ lo hi bits)
14744	for {
14745		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14746			if v_0.Op != OpAMD64SHLXQ {
14747				continue
14748			}
14749			bits := v_0.Args[1]
14750			lo := v_0.Args[0]
14751			if v_1.Op != OpAMD64SHRXQ {
14752				continue
14753			}
14754			_ = v_1.Args[1]
14755			hi := v_1.Args[0]
14756			v_1_1 := v_1.Args[1]
14757			if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14758				continue
14759			}
14760			v.reset(OpAMD64SHLDQ)
14761			v.AddArg3(lo, hi, bits)
14762			return true
14763		}
14764		break
14765	}
14766	// match: (ORQ (MOVQconst [c]) (MOVQconst [d]))
14767	// result: (MOVQconst [c|d])
14768	for {
14769		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14770			if v_0.Op != OpAMD64MOVQconst {
14771				continue
14772			}
14773			c := auxIntToInt64(v_0.AuxInt)
14774			if v_1.Op != OpAMD64MOVQconst {
14775				continue
14776			}
14777			d := auxIntToInt64(v_1.AuxInt)
14778			v.reset(OpAMD64MOVQconst)
14779			v.AuxInt = int64ToAuxInt(c | d)
14780			return true
14781		}
14782		break
14783	}
14784	// match: (ORQ x x)
14785	// result: x
14786	for {
14787		x := v_0
14788		if x != v_1 {
14789			break
14790		}
14791		v.copyOf(x)
14792		return true
14793	}
14794	// match: (ORQ x l:(MOVQload [off] {sym} ptr mem))
14795	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
14796	// result: (ORQload x [off] {sym} ptr mem)
14797	for {
14798		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14799			x := v_0
14800			l := v_1
14801			if l.Op != OpAMD64MOVQload {
14802				continue
14803			}
14804			off := auxIntToInt32(l.AuxInt)
14805			sym := auxToSym(l.Aux)
14806			mem := l.Args[1]
14807			ptr := l.Args[0]
14808			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14809				continue
14810			}
14811			v.reset(OpAMD64ORQload)
14812			v.AuxInt = int32ToAuxInt(off)
14813			v.Aux = symToAux(sym)
14814			v.AddArg3(x, ptr, mem)
14815			return true
14816		}
14817		break
14818	}
14819	return false
14820}
14821func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
14822	v_0 := v.Args[0]
14823	// match: (ORQconst [c] (ORQconst [d] x))
14824	// result: (ORQconst [c | d] x)
14825	for {
14826		c := auxIntToInt32(v.AuxInt)
14827		if v_0.Op != OpAMD64ORQconst {
14828			break
14829		}
14830		d := auxIntToInt32(v_0.AuxInt)
14831		x := v_0.Args[0]
14832		v.reset(OpAMD64ORQconst)
14833		v.AuxInt = int32ToAuxInt(c | d)
14834		v.AddArg(x)
14835		return true
14836	}
14837	// match: (ORQconst [0] x)
14838	// result: x
14839	for {
14840		if auxIntToInt32(v.AuxInt) != 0 {
14841			break
14842		}
14843		x := v_0
14844		v.copyOf(x)
14845		return true
14846	}
14847	// match: (ORQconst [-1] _)
14848	// result: (MOVQconst [-1])
14849	for {
14850		if auxIntToInt32(v.AuxInt) != -1 {
14851			break
14852		}
14853		v.reset(OpAMD64MOVQconst)
14854		v.AuxInt = int64ToAuxInt(-1)
14855		return true
14856	}
14857	// match: (ORQconst [c] (MOVQconst [d]))
14858	// result: (MOVQconst [int64(c)|d])
14859	for {
14860		c := auxIntToInt32(v.AuxInt)
14861		if v_0.Op != OpAMD64MOVQconst {
14862			break
14863		}
14864		d := auxIntToInt64(v_0.AuxInt)
14865		v.reset(OpAMD64MOVQconst)
14866		v.AuxInt = int64ToAuxInt(int64(c) | d)
14867		return true
14868	}
14869	return false
14870}
14871func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
14872	v_1 := v.Args[1]
14873	v_0 := v.Args[0]
14874	// match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
14875	// cond: ValAndOff(valoff1).canAdd32(off2)
14876	// result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
14877	for {
14878		valoff1 := auxIntToValAndOff(v.AuxInt)
14879		sym := auxToSym(v.Aux)
14880		if v_0.Op != OpAMD64ADDQconst {
14881			break
14882		}
14883		off2 := auxIntToInt32(v_0.AuxInt)
14884		base := v_0.Args[0]
14885		mem := v_1
14886		if !(ValAndOff(valoff1).canAdd32(off2)) {
14887			break
14888		}
14889		v.reset(OpAMD64ORQconstmodify)
14890		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14891		v.Aux = symToAux(sym)
14892		v.AddArg2(base, mem)
14893		return true
14894	}
14895	// match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
14896	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
14897	// result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
14898	for {
14899		valoff1 := auxIntToValAndOff(v.AuxInt)
14900		sym1 := auxToSym(v.Aux)
14901		if v_0.Op != OpAMD64LEAQ {
14902			break
14903		}
14904		off2 := auxIntToInt32(v_0.AuxInt)
14905		sym2 := auxToSym(v_0.Aux)
14906		base := v_0.Args[0]
14907		mem := v_1
14908		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14909			break
14910		}
14911		v.reset(OpAMD64ORQconstmodify)
14912		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14913		v.Aux = symToAux(mergeSym(sym1, sym2))
14914		v.AddArg2(base, mem)
14915		return true
14916	}
14917	return false
14918}
14919func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
14920	v_2 := v.Args[2]
14921	v_1 := v.Args[1]
14922	v_0 := v.Args[0]
14923	b := v.Block
14924	typ := &b.Func.Config.Types
14925	// match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem)
14926	// cond: is32Bit(int64(off1)+int64(off2))
14927	// result: (ORQload [off1+off2] {sym} val base mem)
14928	for {
14929		off1 := auxIntToInt32(v.AuxInt)
14930		sym := auxToSym(v.Aux)
14931		val := v_0
14932		if v_1.Op != OpAMD64ADDQconst {
14933			break
14934		}
14935		off2 := auxIntToInt32(v_1.AuxInt)
14936		base := v_1.Args[0]
14937		mem := v_2
14938		if !(is32Bit(int64(off1) + int64(off2))) {
14939			break
14940		}
14941		v.reset(OpAMD64ORQload)
14942		v.AuxInt = int32ToAuxInt(off1 + off2)
14943		v.Aux = symToAux(sym)
14944		v.AddArg3(val, base, mem)
14945		return true
14946	}
14947	// match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
14948	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14949	// result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
14950	for {
14951		off1 := auxIntToInt32(v.AuxInt)
14952		sym1 := auxToSym(v.Aux)
14953		val := v_0
14954		if v_1.Op != OpAMD64LEAQ {
14955			break
14956		}
14957		off2 := auxIntToInt32(v_1.AuxInt)
14958		sym2 := auxToSym(v_1.Aux)
14959		base := v_1.Args[0]
14960		mem := v_2
14961		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14962			break
14963		}
14964		v.reset(OpAMD64ORQload)
14965		v.AuxInt = int32ToAuxInt(off1 + off2)
14966		v.Aux = symToAux(mergeSym(sym1, sym2))
14967		v.AddArg3(val, base, mem)
14968		return true
14969	}
14970	// match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
14971	// result: ( ORQ x (MOVQf2i y))
14972	for {
14973		off := auxIntToInt32(v.AuxInt)
14974		sym := auxToSym(v.Aux)
14975		x := v_0
14976		ptr := v_1
14977		if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14978			break
14979		}
14980		y := v_2.Args[1]
14981		if ptr != v_2.Args[0] {
14982			break
14983		}
14984		v.reset(OpAMD64ORQ)
14985		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
14986		v0.AddArg(y)
14987		v.AddArg2(x, v0)
14988		return true
14989	}
14990	return false
14991}
14992func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
14993	v_2 := v.Args[2]
14994	v_1 := v.Args[1]
14995	v_0 := v.Args[0]
14996	// match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
14997	// cond: is32Bit(int64(off1)+int64(off2))
14998	// result: (ORQmodify [off1+off2] {sym} base val mem)
14999	for {
15000		off1 := auxIntToInt32(v.AuxInt)
15001		sym := auxToSym(v.Aux)
15002		if v_0.Op != OpAMD64ADDQconst {
15003			break
15004		}
15005		off2 := auxIntToInt32(v_0.AuxInt)
15006		base := v_0.Args[0]
15007		val := v_1
15008		mem := v_2
15009		if !(is32Bit(int64(off1) + int64(off2))) {
15010			break
15011		}
15012		v.reset(OpAMD64ORQmodify)
15013		v.AuxInt = int32ToAuxInt(off1 + off2)
15014		v.Aux = symToAux(sym)
15015		v.AddArg3(base, val, mem)
15016		return true
15017	}
15018	// match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
15019	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
15020	// result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
15021	for {
15022		off1 := auxIntToInt32(v.AuxInt)
15023		sym1 := auxToSym(v.Aux)
15024		if v_0.Op != OpAMD64LEAQ {
15025			break
15026		}
15027		off2 := auxIntToInt32(v_0.AuxInt)
15028		sym2 := auxToSym(v_0.Aux)
15029		base := v_0.Args[0]
15030		val := v_1
15031		mem := v_2
15032		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15033			break
15034		}
15035		v.reset(OpAMD64ORQmodify)
15036		v.AuxInt = int32ToAuxInt(off1 + off2)
15037		v.Aux = symToAux(mergeSym(sym1, sym2))
15038		v.AddArg3(base, val, mem)
15039		return true
15040	}
15041	return false
15042}
15043func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
15044	v_1 := v.Args[1]
15045	v_0 := v.Args[0]
15046	// match: (ROLB x (NEGQ y))
15047	// result: (RORB x y)
15048	for {
15049		x := v_0
15050		if v_1.Op != OpAMD64NEGQ {
15051			break
15052		}
15053		y := v_1.Args[0]
15054		v.reset(OpAMD64RORB)
15055		v.AddArg2(x, y)
15056		return true
15057	}
15058	// match: (ROLB x (NEGL y))
15059	// result: (RORB x y)
15060	for {
15061		x := v_0
15062		if v_1.Op != OpAMD64NEGL {
15063			break
15064		}
15065		y := v_1.Args[0]
15066		v.reset(OpAMD64RORB)
15067		v.AddArg2(x, y)
15068		return true
15069	}
15070	// match: (ROLB x (MOVQconst [c]))
15071	// result: (ROLBconst [int8(c&7) ] x)
15072	for {
15073		x := v_0
15074		if v_1.Op != OpAMD64MOVQconst {
15075			break
15076		}
15077		c := auxIntToInt64(v_1.AuxInt)
15078		v.reset(OpAMD64ROLBconst)
15079		v.AuxInt = int8ToAuxInt(int8(c & 7))
15080		v.AddArg(x)
15081		return true
15082	}
15083	// match: (ROLB x (MOVLconst [c]))
15084	// result: (ROLBconst [int8(c&7) ] x)
15085	for {
15086		x := v_0
15087		if v_1.Op != OpAMD64MOVLconst {
15088			break
15089		}
15090		c := auxIntToInt32(v_1.AuxInt)
15091		v.reset(OpAMD64ROLBconst)
15092		v.AuxInt = int8ToAuxInt(int8(c & 7))
15093		v.AddArg(x)
15094		return true
15095	}
15096	return false
15097}
15098func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
15099	v_0 := v.Args[0]
15100	// match: (ROLBconst x [0])
15101	// result: x
15102	for {
15103		if auxIntToInt8(v.AuxInt) != 0 {
15104			break
15105		}
15106		x := v_0
15107		v.copyOf(x)
15108		return true
15109	}
15110	return false
15111}
15112func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
15113	v_1 := v.Args[1]
15114	v_0 := v.Args[0]
15115	// match: (ROLL x (NEGQ y))
15116	// result: (RORL x y)
15117	for {
15118		x := v_0
15119		if v_1.Op != OpAMD64NEGQ {
15120			break
15121		}
15122		y := v_1.Args[0]
15123		v.reset(OpAMD64RORL)
15124		v.AddArg2(x, y)
15125		return true
15126	}
15127	// match: (ROLL x (NEGL y))
15128	// result: (RORL x y)
15129	for {
15130		x := v_0
15131		if v_1.Op != OpAMD64NEGL {
15132			break
15133		}
15134		y := v_1.Args[0]
15135		v.reset(OpAMD64RORL)
15136		v.AddArg2(x, y)
15137		return true
15138	}
15139	// match: (ROLL x (MOVQconst [c]))
15140	// result: (ROLLconst [int8(c&31)] x)
15141	for {
15142		x := v_0
15143		if v_1.Op != OpAMD64MOVQconst {
15144			break
15145		}
15146		c := auxIntToInt64(v_1.AuxInt)
15147		v.reset(OpAMD64ROLLconst)
15148		v.AuxInt = int8ToAuxInt(int8(c & 31))
15149		v.AddArg(x)
15150		return true
15151	}
15152	// match: (ROLL x (MOVLconst [c]))
15153	// result: (ROLLconst [int8(c&31)] x)
15154	for {
15155		x := v_0
15156		if v_1.Op != OpAMD64MOVLconst {
15157			break
15158		}
15159		c := auxIntToInt32(v_1.AuxInt)
15160		v.reset(OpAMD64ROLLconst)
15161		v.AuxInt = int8ToAuxInt(int8(c & 31))
15162		v.AddArg(x)
15163		return true
15164	}
15165	return false
15166}
15167func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
15168	v_0 := v.Args[0]
15169	// match: (ROLLconst x [0])
15170	// result: x
15171	for {
15172		if auxIntToInt8(v.AuxInt) != 0 {
15173			break
15174		}
15175		x := v_0
15176		v.copyOf(x)
15177		return true
15178	}
15179	return false
15180}
15181func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
15182	v_1 := v.Args[1]
15183	v_0 := v.Args[0]
15184	// match: (ROLQ x (NEGQ y))
15185	// result: (RORQ x y)
15186	for {
15187		x := v_0
15188		if v_1.Op != OpAMD64NEGQ {
15189			break
15190		}
15191		y := v_1.Args[0]
15192		v.reset(OpAMD64RORQ)
15193		v.AddArg2(x, y)
15194		return true
15195	}
15196	// match: (ROLQ x (NEGL y))
15197	// result: (RORQ x y)
15198	for {
15199		x := v_0
15200		if v_1.Op != OpAMD64NEGL {
15201			break
15202		}
15203		y := v_1.Args[0]
15204		v.reset(OpAMD64RORQ)
15205		v.AddArg2(x, y)
15206		return true
15207	}
15208	// match: (ROLQ x (MOVQconst [c]))
15209	// result: (ROLQconst [int8(c&63)] x)
15210	for {
15211		x := v_0
15212		if v_1.Op != OpAMD64MOVQconst {
15213			break
15214		}
15215		c := auxIntToInt64(v_1.AuxInt)
15216		v.reset(OpAMD64ROLQconst)
15217		v.AuxInt = int8ToAuxInt(int8(c & 63))
15218		v.AddArg(x)
15219		return true
15220	}
15221	// match: (ROLQ x (MOVLconst [c]))
15222	// result: (ROLQconst [int8(c&63)] x)
15223	for {
15224		x := v_0
15225		if v_1.Op != OpAMD64MOVLconst {
15226			break
15227		}
15228		c := auxIntToInt32(v_1.AuxInt)
15229		v.reset(OpAMD64ROLQconst)
15230		v.AuxInt = int8ToAuxInt(int8(c & 63))
15231		v.AddArg(x)
15232		return true
15233	}
15234	return false
15235}
15236func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
15237	v_0 := v.Args[0]
15238	// match: (ROLQconst x [0])
15239	// result: x
15240	for {
15241		if auxIntToInt8(v.AuxInt) != 0 {
15242			break
15243		}
15244		x := v_0
15245		v.copyOf(x)
15246		return true
15247	}
15248	return false
15249}
15250func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
15251	v_1 := v.Args[1]
15252	v_0 := v.Args[0]
15253	// match: (ROLW x (NEGQ y))
15254	// result: (RORW x y)
15255	for {
15256		x := v_0
15257		if v_1.Op != OpAMD64NEGQ {
15258			break
15259		}
15260		y := v_1.Args[0]
15261		v.reset(OpAMD64RORW)
15262		v.AddArg2(x, y)
15263		return true
15264	}
15265	// match: (ROLW x (NEGL y))
15266	// result: (RORW x y)
15267	for {
15268		x := v_0
15269		if v_1.Op != OpAMD64NEGL {
15270			break
15271		}
15272		y := v_1.Args[0]
15273		v.reset(OpAMD64RORW)
15274		v.AddArg2(x, y)
15275		return true
15276	}
15277	// match: (ROLW x (MOVQconst [c]))
15278	// result: (ROLWconst [int8(c&15)] x)
15279	for {
15280		x := v_0
15281		if v_1.Op != OpAMD64MOVQconst {
15282			break
15283		}
15284		c := auxIntToInt64(v_1.AuxInt)
15285		v.reset(OpAMD64ROLWconst)
15286		v.AuxInt = int8ToAuxInt(int8(c & 15))
15287		v.AddArg(x)
15288		return true
15289	}
15290	// match: (ROLW x (MOVLconst [c]))
15291	// result: (ROLWconst [int8(c&15)] x)
15292	for {
15293		x := v_0
15294		if v_1.Op != OpAMD64MOVLconst {
15295			break
15296		}
15297		c := auxIntToInt32(v_1.AuxInt)
15298		v.reset(OpAMD64ROLWconst)
15299		v.AuxInt = int8ToAuxInt(int8(c & 15))
15300		v.AddArg(x)
15301		return true
15302	}
15303	return false
15304}
15305func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
15306	v_0 := v.Args[0]
15307	// match: (ROLWconst x [0])
15308	// result: x
15309	for {
15310		if auxIntToInt8(v.AuxInt) != 0 {
15311			break
15312		}
15313		x := v_0
15314		v.copyOf(x)
15315		return true
15316	}
15317	return false
15318}
15319func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
15320	v_1 := v.Args[1]
15321	v_0 := v.Args[0]
15322	// match: (RORB x (NEGQ y))
15323	// result: (ROLB x y)
15324	for {
15325		x := v_0
15326		if v_1.Op != OpAMD64NEGQ {
15327			break
15328		}
15329		y := v_1.Args[0]
15330		v.reset(OpAMD64ROLB)
15331		v.AddArg2(x, y)
15332		return true
15333	}
15334	// match: (RORB x (NEGL y))
15335	// result: (ROLB x y)
15336	for {
15337		x := v_0
15338		if v_1.Op != OpAMD64NEGL {
15339			break
15340		}
15341		y := v_1.Args[0]
15342		v.reset(OpAMD64ROLB)
15343		v.AddArg2(x, y)
15344		return true
15345	}
15346	// match: (RORB x (MOVQconst [c]))
15347	// result: (ROLBconst [int8((-c)&7) ] x)
15348	for {
15349		x := v_0
15350		if v_1.Op != OpAMD64MOVQconst {
15351			break
15352		}
15353		c := auxIntToInt64(v_1.AuxInt)
15354		v.reset(OpAMD64ROLBconst)
15355		v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15356		v.AddArg(x)
15357		return true
15358	}
15359	// match: (RORB x (MOVLconst [c]))
15360	// result: (ROLBconst [int8((-c)&7) ] x)
15361	for {
15362		x := v_0
15363		if v_1.Op != OpAMD64MOVLconst {
15364			break
15365		}
15366		c := auxIntToInt32(v_1.AuxInt)
15367		v.reset(OpAMD64ROLBconst)
15368		v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15369		v.AddArg(x)
15370		return true
15371	}
15372	return false
15373}
15374func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
15375	v_1 := v.Args[1]
15376	v_0 := v.Args[0]
15377	// match: (RORL x (NEGQ y))
15378	// result: (ROLL x y)
15379	for {
15380		x := v_0
15381		if v_1.Op != OpAMD64NEGQ {
15382			break
15383		}
15384		y := v_1.Args[0]
15385		v.reset(OpAMD64ROLL)
15386		v.AddArg2(x, y)
15387		return true
15388	}
15389	// match: (RORL x (NEGL y))
15390	// result: (ROLL x y)
15391	for {
15392		x := v_0
15393		if v_1.Op != OpAMD64NEGL {
15394			break
15395		}
15396		y := v_1.Args[0]
15397		v.reset(OpAMD64ROLL)
15398		v.AddArg2(x, y)
15399		return true
15400	}
15401	// match: (RORL x (MOVQconst [c]))
15402	// result: (ROLLconst [int8((-c)&31)] x)
15403	for {
15404		x := v_0
15405		if v_1.Op != OpAMD64MOVQconst {
15406			break
15407		}
15408		c := auxIntToInt64(v_1.AuxInt)
15409		v.reset(OpAMD64ROLLconst)
15410		v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15411		v.AddArg(x)
15412		return true
15413	}
15414	// match: (RORL x (MOVLconst [c]))
15415	// result: (ROLLconst [int8((-c)&31)] x)
15416	for {
15417		x := v_0
15418		if v_1.Op != OpAMD64MOVLconst {
15419			break
15420		}
15421		c := auxIntToInt32(v_1.AuxInt)
15422		v.reset(OpAMD64ROLLconst)
15423		v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15424		v.AddArg(x)
15425		return true
15426	}
15427	return false
15428}
15429func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
15430	v_1 := v.Args[1]
15431	v_0 := v.Args[0]
15432	// match: (RORQ x (NEGQ y))
15433	// result: (ROLQ x y)
15434	for {
15435		x := v_0
15436		if v_1.Op != OpAMD64NEGQ {
15437			break
15438		}
15439		y := v_1.Args[0]
15440		v.reset(OpAMD64ROLQ)
15441		v.AddArg2(x, y)
15442		return true
15443	}
15444	// match: (RORQ x (NEGL y))
15445	// result: (ROLQ x y)
15446	for {
15447		x := v_0
15448		if v_1.Op != OpAMD64NEGL {
15449			break
15450		}
15451		y := v_1.Args[0]
15452		v.reset(OpAMD64ROLQ)
15453		v.AddArg2(x, y)
15454		return true
15455	}
15456	// match: (RORQ x (MOVQconst [c]))
15457	// result: (ROLQconst [int8((-c)&63)] x)
15458	for {
15459		x := v_0
15460		if v_1.Op != OpAMD64MOVQconst {
15461			break
15462		}
15463		c := auxIntToInt64(v_1.AuxInt)
15464		v.reset(OpAMD64ROLQconst)
15465		v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15466		v.AddArg(x)
15467		return true
15468	}
15469	// match: (RORQ x (MOVLconst [c]))
15470	// result: (ROLQconst [int8((-c)&63)] x)
15471	for {
15472		x := v_0
15473		if v_1.Op != OpAMD64MOVLconst {
15474			break
15475		}
15476		c := auxIntToInt32(v_1.AuxInt)
15477		v.reset(OpAMD64ROLQconst)
15478		v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15479		v.AddArg(x)
15480		return true
15481	}
15482	return false
15483}
15484func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
15485	v_1 := v.Args[1]
15486	v_0 := v.Args[0]
15487	// match: (RORW x (NEGQ y))
15488	// result: (ROLW x y)
15489	for {
15490		x := v_0
15491		if v_1.Op != OpAMD64NEGQ {
15492			break
15493		}
15494		y := v_1.Args[0]
15495		v.reset(OpAMD64ROLW)
15496		v.AddArg2(x, y)
15497		return true
15498	}
15499	// match: (RORW x (NEGL y))
15500	// result: (ROLW x y)
15501	for {
15502		x := v_0
15503		if v_1.Op != OpAMD64NEGL {
15504			break
15505		}
15506		y := v_1.Args[0]
15507		v.reset(OpAMD64ROLW)
15508		v.AddArg2(x, y)
15509		return true
15510	}
15511	// match: (RORW x (MOVQconst [c]))
15512	// result: (ROLWconst [int8((-c)&15)] x)
15513	for {
15514		x := v_0
15515		if v_1.Op != OpAMD64MOVQconst {
15516			break
15517		}
15518		c := auxIntToInt64(v_1.AuxInt)
15519		v.reset(OpAMD64ROLWconst)
15520		v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15521		v.AddArg(x)
15522		return true
15523	}
15524	// match: (RORW x (MOVLconst [c]))
15525	// result: (ROLWconst [int8((-c)&15)] x)
15526	for {
15527		x := v_0
15528		if v_1.Op != OpAMD64MOVLconst {
15529			break
15530		}
15531		c := auxIntToInt32(v_1.AuxInt)
15532		v.reset(OpAMD64ROLWconst)
15533		v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15534		v.AddArg(x)
15535		return true
15536	}
15537	return false
15538}
15539func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
15540	v_1 := v.Args[1]
15541	v_0 := v.Args[0]
15542	// match: (SARB x (MOVQconst [c]))
15543	// result: (SARBconst [int8(min(int64(c)&31,7))] x)
15544	for {
15545		x := v_0
15546		if v_1.Op != OpAMD64MOVQconst {
15547			break
15548		}
15549		c := auxIntToInt64(v_1.AuxInt)
15550		v.reset(OpAMD64SARBconst)
15551		v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15552		v.AddArg(x)
15553		return true
15554	}
15555	// match: (SARB x (MOVLconst [c]))
15556	// result: (SARBconst [int8(min(int64(c)&31,7))] x)
15557	for {
15558		x := v_0
15559		if v_1.Op != OpAMD64MOVLconst {
15560			break
15561		}
15562		c := auxIntToInt32(v_1.AuxInt)
15563		v.reset(OpAMD64SARBconst)
15564		v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15565		v.AddArg(x)
15566		return true
15567	}
15568	return false
15569}
15570func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
15571	v_0 := v.Args[0]
15572	// match: (SARBconst x [0])
15573	// result: x
15574	for {
15575		if auxIntToInt8(v.AuxInt) != 0 {
15576			break
15577		}
15578		x := v_0
15579		v.copyOf(x)
15580		return true
15581	}
15582	// match: (SARBconst [c] (MOVQconst [d]))
15583	// result: (MOVQconst [int64(int8(d))>>uint64(c)])
15584	for {
15585		c := auxIntToInt8(v.AuxInt)
15586		if v_0.Op != OpAMD64MOVQconst {
15587			break
15588		}
15589		d := auxIntToInt64(v_0.AuxInt)
15590		v.reset(OpAMD64MOVQconst)
15591		v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
15592		return true
15593	}
15594	return false
15595}
15596func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
15597	v_1 := v.Args[1]
15598	v_0 := v.Args[0]
15599	b := v.Block
15600	// match: (SARL x (MOVQconst [c]))
15601	// result: (SARLconst [int8(c&31)] x)
15602	for {
15603		x := v_0
15604		if v_1.Op != OpAMD64MOVQconst {
15605			break
15606		}
15607		c := auxIntToInt64(v_1.AuxInt)
15608		v.reset(OpAMD64SARLconst)
15609		v.AuxInt = int8ToAuxInt(int8(c & 31))
15610		v.AddArg(x)
15611		return true
15612	}
15613	// match: (SARL x (MOVLconst [c]))
15614	// result: (SARLconst [int8(c&31)] x)
15615	for {
15616		x := v_0
15617		if v_1.Op != OpAMD64MOVLconst {
15618			break
15619		}
15620		c := auxIntToInt32(v_1.AuxInt)
15621		v.reset(OpAMD64SARLconst)
15622		v.AuxInt = int8ToAuxInt(int8(c & 31))
15623		v.AddArg(x)
15624		return true
15625	}
15626	// match: (SARL x (ADDQconst [c] y))
15627	// cond: c & 31 == 0
15628	// result: (SARL x y)
15629	for {
15630		x := v_0
15631		if v_1.Op != OpAMD64ADDQconst {
15632			break
15633		}
15634		c := auxIntToInt32(v_1.AuxInt)
15635		y := v_1.Args[0]
15636		if !(c&31 == 0) {
15637			break
15638		}
15639		v.reset(OpAMD64SARL)
15640		v.AddArg2(x, y)
15641		return true
15642	}
15643	// match: (SARL x (NEGQ <t> (ADDQconst [c] y)))
15644	// cond: c & 31 == 0
15645	// result: (SARL x (NEGQ <t> y))
15646	for {
15647		x := v_0
15648		if v_1.Op != OpAMD64NEGQ {
15649			break
15650		}
15651		t := v_1.Type
15652		v_1_0 := v_1.Args[0]
15653		if v_1_0.Op != OpAMD64ADDQconst {
15654			break
15655		}
15656		c := auxIntToInt32(v_1_0.AuxInt)
15657		y := v_1_0.Args[0]
15658		if !(c&31 == 0) {
15659			break
15660		}
15661		v.reset(OpAMD64SARL)
15662		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15663		v0.AddArg(y)
15664		v.AddArg2(x, v0)
15665		return true
15666	}
15667	// match: (SARL x (ANDQconst [c] y))
15668	// cond: c & 31 == 31
15669	// result: (SARL x y)
15670	for {
15671		x := v_0
15672		if v_1.Op != OpAMD64ANDQconst {
15673			break
15674		}
15675		c := auxIntToInt32(v_1.AuxInt)
15676		y := v_1.Args[0]
15677		if !(c&31 == 31) {
15678			break
15679		}
15680		v.reset(OpAMD64SARL)
15681		v.AddArg2(x, y)
15682		return true
15683	}
15684	// match: (SARL x (NEGQ <t> (ANDQconst [c] y)))
15685	// cond: c & 31 == 31
15686	// result: (SARL x (NEGQ <t> y))
15687	for {
15688		x := v_0
15689		if v_1.Op != OpAMD64NEGQ {
15690			break
15691		}
15692		t := v_1.Type
15693		v_1_0 := v_1.Args[0]
15694		if v_1_0.Op != OpAMD64ANDQconst {
15695			break
15696		}
15697		c := auxIntToInt32(v_1_0.AuxInt)
15698		y := v_1_0.Args[0]
15699		if !(c&31 == 31) {
15700			break
15701		}
15702		v.reset(OpAMD64SARL)
15703		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15704		v0.AddArg(y)
15705		v.AddArg2(x, v0)
15706		return true
15707	}
15708	// match: (SARL x (ADDLconst [c] y))
15709	// cond: c & 31 == 0
15710	// result: (SARL x y)
15711	for {
15712		x := v_0
15713		if v_1.Op != OpAMD64ADDLconst {
15714			break
15715		}
15716		c := auxIntToInt32(v_1.AuxInt)
15717		y := v_1.Args[0]
15718		if !(c&31 == 0) {
15719			break
15720		}
15721		v.reset(OpAMD64SARL)
15722		v.AddArg2(x, y)
15723		return true
15724	}
15725	// match: (SARL x (NEGL <t> (ADDLconst [c] y)))
15726	// cond: c & 31 == 0
15727	// result: (SARL x (NEGL <t> y))
15728	for {
15729		x := v_0
15730		if v_1.Op != OpAMD64NEGL {
15731			break
15732		}
15733		t := v_1.Type
15734		v_1_0 := v_1.Args[0]
15735		if v_1_0.Op != OpAMD64ADDLconst {
15736			break
15737		}
15738		c := auxIntToInt32(v_1_0.AuxInt)
15739		y := v_1_0.Args[0]
15740		if !(c&31 == 0) {
15741			break
15742		}
15743		v.reset(OpAMD64SARL)
15744		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15745		v0.AddArg(y)
15746		v.AddArg2(x, v0)
15747		return true
15748	}
15749	// match: (SARL x (ANDLconst [c] y))
15750	// cond: c & 31 == 31
15751	// result: (SARL x y)
15752	for {
15753		x := v_0
15754		if v_1.Op != OpAMD64ANDLconst {
15755			break
15756		}
15757		c := auxIntToInt32(v_1.AuxInt)
15758		y := v_1.Args[0]
15759		if !(c&31 == 31) {
15760			break
15761		}
15762		v.reset(OpAMD64SARL)
15763		v.AddArg2(x, y)
15764		return true
15765	}
15766	// match: (SARL x (NEGL <t> (ANDLconst [c] y)))
15767	// cond: c & 31 == 31
15768	// result: (SARL x (NEGL <t> y))
15769	for {
15770		x := v_0
15771		if v_1.Op != OpAMD64NEGL {
15772			break
15773		}
15774		t := v_1.Type
15775		v_1_0 := v_1.Args[0]
15776		if v_1_0.Op != OpAMD64ANDLconst {
15777			break
15778		}
15779		c := auxIntToInt32(v_1_0.AuxInt)
15780		y := v_1_0.Args[0]
15781		if !(c&31 == 31) {
15782			break
15783		}
15784		v.reset(OpAMD64SARL)
15785		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15786		v0.AddArg(y)
15787		v.AddArg2(x, v0)
15788		return true
15789	}
15790	// match: (SARL l:(MOVLload [off] {sym} ptr mem) x)
15791	// cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
15792	// result: (SARXLload [off] {sym} ptr x mem)
15793	for {
15794		l := v_0
15795		if l.Op != OpAMD64MOVLload {
15796			break
15797		}
15798		off := auxIntToInt32(l.AuxInt)
15799		sym := auxToSym(l.Aux)
15800		mem := l.Args[1]
15801		ptr := l.Args[0]
15802		x := v_1
15803		if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
15804			break
15805		}
15806		v.reset(OpAMD64SARXLload)
15807		v.AuxInt = int32ToAuxInt(off)
15808		v.Aux = symToAux(sym)
15809		v.AddArg3(ptr, x, mem)
15810		return true
15811	}
15812	return false
15813}
15814func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
15815	v_0 := v.Args[0]
15816	// match: (SARLconst x [0])
15817	// result: x
15818	for {
15819		if auxIntToInt8(v.AuxInt) != 0 {
15820			break
15821		}
15822		x := v_0
15823		v.copyOf(x)
15824		return true
15825	}
15826	// match: (SARLconst [c] (MOVQconst [d]))
15827	// result: (MOVQconst [int64(int32(d))>>uint64(c)])
15828	for {
15829		c := auxIntToInt8(v.AuxInt)
15830		if v_0.Op != OpAMD64MOVQconst {
15831			break
15832		}
15833		d := auxIntToInt64(v_0.AuxInt)
15834		v.reset(OpAMD64MOVQconst)
15835		v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
15836		return true
15837	}
15838	return false
15839}
15840func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
15841	v_1 := v.Args[1]
15842	v_0 := v.Args[0]
15843	b := v.Block
15844	// match: (SARQ x (MOVQconst [c]))
15845	// result: (SARQconst [int8(c&63)] x)
15846	for {
15847		x := v_0
15848		if v_1.Op != OpAMD64MOVQconst {
15849			break
15850		}
15851		c := auxIntToInt64(v_1.AuxInt)
15852		v.reset(OpAMD64SARQconst)
15853		v.AuxInt = int8ToAuxInt(int8(c & 63))
15854		v.AddArg(x)
15855		return true
15856	}
15857	// match: (SARQ x (MOVLconst [c]))
15858	// result: (SARQconst [int8(c&63)] x)
15859	for {
15860		x := v_0
15861		if v_1.Op != OpAMD64MOVLconst {
15862			break
15863		}
15864		c := auxIntToInt32(v_1.AuxInt)
15865		v.reset(OpAMD64SARQconst)
15866		v.AuxInt = int8ToAuxInt(int8(c & 63))
15867		v.AddArg(x)
15868		return true
15869	}
15870	// match: (SARQ x (ADDQconst [c] y))
15871	// cond: c & 63 == 0
15872	// result: (SARQ x y)
15873	for {
15874		x := v_0
15875		if v_1.Op != OpAMD64ADDQconst {
15876			break
15877		}
15878		c := auxIntToInt32(v_1.AuxInt)
15879		y := v_1.Args[0]
15880		if !(c&63 == 0) {
15881			break
15882		}
15883		v.reset(OpAMD64SARQ)
15884		v.AddArg2(x, y)
15885		return true
15886	}
15887	// match: (SARQ x (NEGQ <t> (ADDQconst [c] y)))
15888	// cond: c & 63 == 0
15889	// result: (SARQ x (NEGQ <t> y))
15890	for {
15891		x := v_0
15892		if v_1.Op != OpAMD64NEGQ {
15893			break
15894		}
15895		t := v_1.Type
15896		v_1_0 := v_1.Args[0]
15897		if v_1_0.Op != OpAMD64ADDQconst {
15898			break
15899		}
15900		c := auxIntToInt32(v_1_0.AuxInt)
15901		y := v_1_0.Args[0]
15902		if !(c&63 == 0) {
15903			break
15904		}
15905		v.reset(OpAMD64SARQ)
15906		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15907		v0.AddArg(y)
15908		v.AddArg2(x, v0)
15909		return true
15910	}
15911	// match: (SARQ x (ANDQconst [c] y))
15912	// cond: c & 63 == 63
15913	// result: (SARQ x y)
15914	for {
15915		x := v_0
15916		if v_1.Op != OpAMD64ANDQconst {
15917			break
15918		}
15919		c := auxIntToInt32(v_1.AuxInt)
15920		y := v_1.Args[0]
15921		if !(c&63 == 63) {
15922			break
15923		}
15924		v.reset(OpAMD64SARQ)
15925		v.AddArg2(x, y)
15926		return true
15927	}
15928	// match: (SARQ x (NEGQ <t> (ANDQconst [c] y)))
15929	// cond: c & 63 == 63
15930	// result: (SARQ x (NEGQ <t> y))
15931	for {
15932		x := v_0
15933		if v_1.Op != OpAMD64NEGQ {
15934			break
15935		}
15936		t := v_1.Type
15937		v_1_0 := v_1.Args[0]
15938		if v_1_0.Op != OpAMD64ANDQconst {
15939			break
15940		}
15941		c := auxIntToInt32(v_1_0.AuxInt)
15942		y := v_1_0.Args[0]
15943		if !(c&63 == 63) {
15944			break
15945		}
15946		v.reset(OpAMD64SARQ)
15947		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15948		v0.AddArg(y)
15949		v.AddArg2(x, v0)
15950		return true
15951	}
15952	// match: (SARQ x (ADDLconst [c] y))
15953	// cond: c & 63 == 0
15954	// result: (SARQ x y)
15955	for {
15956		x := v_0
15957		if v_1.Op != OpAMD64ADDLconst {
15958			break
15959		}
15960		c := auxIntToInt32(v_1.AuxInt)
15961		y := v_1.Args[0]
15962		if !(c&63 == 0) {
15963			break
15964		}
15965		v.reset(OpAMD64SARQ)
15966		v.AddArg2(x, y)
15967		return true
15968	}
15969	// match: (SARQ x (NEGL <t> (ADDLconst [c] y)))
15970	// cond: c & 63 == 0
15971	// result: (SARQ x (NEGL <t> y))
15972	for {
15973		x := v_0
15974		if v_1.Op != OpAMD64NEGL {
15975			break
15976		}
15977		t := v_1.Type
15978		v_1_0 := v_1.Args[0]
15979		if v_1_0.Op != OpAMD64ADDLconst {
15980			break
15981		}
15982		c := auxIntToInt32(v_1_0.AuxInt)
15983		y := v_1_0.Args[0]
15984		if !(c&63 == 0) {
15985			break
15986		}
15987		v.reset(OpAMD64SARQ)
15988		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15989		v0.AddArg(y)
15990		v.AddArg2(x, v0)
15991		return true
15992	}
15993	// match: (SARQ x (ANDLconst [c] y))
15994	// cond: c & 63 == 63
15995	// result: (SARQ x y)
15996	for {
15997		x := v_0
15998		if v_1.Op != OpAMD64ANDLconst {
15999			break
16000		}
16001		c := auxIntToInt32(v_1.AuxInt)
16002		y := v_1.Args[0]
16003		if !(c&63 == 63) {
16004			break
16005		}
16006		v.reset(OpAMD64SARQ)
16007		v.AddArg2(x, y)
16008		return true
16009	}
16010	// match: (SARQ x (NEGL <t> (ANDLconst [c] y)))
16011	// cond: c & 63 == 63
16012	// result: (SARQ x (NEGL <t> y))
16013	for {
16014		x := v_0
16015		if v_1.Op != OpAMD64NEGL {
16016			break
16017		}
16018		t := v_1.Type
16019		v_1_0 := v_1.Args[0]
16020		if v_1_0.Op != OpAMD64ANDLconst {
16021			break
16022		}
16023		c := auxIntToInt32(v_1_0.AuxInt)
16024		y := v_1_0.Args[0]
16025		if !(c&63 == 63) {
16026			break
16027		}
16028		v.reset(OpAMD64SARQ)
16029		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
16030		v0.AddArg(y)
16031		v.AddArg2(x, v0)
16032		return true
16033	}
16034	// match: (SARQ l:(MOVQload [off] {sym} ptr mem) x)
16035	// cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
16036	// result: (SARXQload [off] {sym} ptr x mem)
16037	for {
16038		l := v_0
16039		if l.Op != OpAMD64MOVQload {
16040			break
16041		}
16042		off := auxIntToInt32(l.AuxInt)
16043		sym := auxToSym(l.Aux)
16044		mem := l.Args[1]
16045		ptr := l.Args[0]
16046		x := v_1
16047		if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
16048			break
16049		}
16050		v.reset(OpAMD64SARXQload)
16051		v.AuxInt = int32ToAuxInt(off)
16052		v.Aux = symToAux(sym)
16053		v.AddArg3(ptr, x, mem)
16054		return true
16055	}
16056	return false
16057}
16058func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
16059	v_0 := v.Args[0]
16060	// match: (SARQconst x [0])
16061	// result: x
16062	for {
16063		if auxIntToInt8(v.AuxInt) != 0 {
16064			break
16065		}
16066		x := v_0
16067		v.copyOf(x)
16068		return true
16069	}
16070	// match: (SARQconst [c] (MOVQconst [d]))
16071	// result: (MOVQconst [d>>uint64(c)])
16072	for {
16073		c := auxIntToInt8(v.AuxInt)
16074		if v_0.Op != OpAMD64MOVQconst {
16075			break
16076		}
16077		d := auxIntToInt64(v_0.AuxInt)
16078		v.reset(OpAMD64MOVQconst)
16079		v.AuxInt = int64ToAuxInt(d >> uint64(c))
16080		return true
16081	}
16082	return false
16083}
16084func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
16085	v_1 := v.Args[1]
16086	v_0 := v.Args[0]
16087	// match: (SARW x (MOVQconst [c]))
16088	// result: (SARWconst [int8(min(int64(c)&31,15))] x)
16089	for {
16090		x := v_0
16091		if v_1.Op != OpAMD64MOVQconst {
16092			break
16093		}
16094		c := auxIntToInt64(v_1.AuxInt)
16095		v.reset(OpAMD64SARWconst)
16096		v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
16097		v.AddArg(x)
16098		return true
16099	}
16100	// match: (SARW x (MOVLconst [c]))
16101	// result: (SARWconst [int8(min(int64(c)&31,15))] x)
16102	for {
16103		x := v_0
16104		if v_1.Op != OpAMD64MOVLconst {
16105			break
16106		}
16107		c := auxIntToInt32(v_1.AuxInt)
16108		v.reset(OpAMD64SARWconst)
16109		v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
16110		v.AddArg(x)
16111		return true
16112	}
16113	return false
16114}
16115func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
16116	v_0 := v.Args[0]
16117	// match: (SARWconst x [0])
16118	// result: x
16119	for {
16120		if auxIntToInt8(v.AuxInt) != 0 {
16121			break
16122		}
16123		x := v_0
16124		v.copyOf(x)
16125		return true
16126	}
16127	// match: (SARWconst [c] (MOVQconst [d]))
16128	// result: (MOVQconst [int64(int16(d))>>uint64(c)])
16129	for {
16130		c := auxIntToInt8(v.AuxInt)
16131		if v_0.Op != OpAMD64MOVQconst {
16132			break
16133		}
16134		d := auxIntToInt64(v_0.AuxInt)
16135		v.reset(OpAMD64MOVQconst)
16136		v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
16137		return true
16138	}
16139	return false
16140}
16141func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool {
16142	v_2 := v.Args[2]
16143	v_1 := v.Args[1]
16144	v_0 := v.Args[0]
16145	b := v.Block
16146	typ := &b.Func.Config.Types
16147	// match: (SARXLload [off] {sym} ptr (MOVLconst [c]) mem)
16148	// result: (SARLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
16149	for {
16150		off := auxIntToInt32(v.AuxInt)
16151		sym := auxToSym(v.Aux)
16152		ptr := v_0
16153		if v_1.Op != OpAMD64MOVLconst {
16154			break
16155		}
16156		c := auxIntToInt32(v_1.AuxInt)
16157		mem := v_2
16158		v.reset(OpAMD64SARLconst)
16159		v.AuxInt = int8ToAuxInt(int8(c & 31))
16160		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
16161		v0.AuxInt = int32ToAuxInt(off)
16162		v0.Aux = symToAux(sym)
16163		v0.AddArg2(ptr, mem)
16164		v.AddArg(v0)
16165		return true
16166	}
16167	return false
16168}
16169func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool {
16170	v_2 := v.Args[2]
16171	v_1 := v.Args[1]
16172	v_0 := v.Args[0]
16173	b := v.Block
16174	typ := &b.Func.Config.Types
16175	// match: (SARXQload [off] {sym} ptr (MOVQconst [c]) mem)
16176	// result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
16177	for {
16178		off := auxIntToInt32(v.AuxInt)
16179		sym := auxToSym(v.Aux)
16180		ptr := v_0
16181		if v_1.Op != OpAMD64MOVQconst {
16182			break
16183		}
16184		c := auxIntToInt64(v_1.AuxInt)
16185		mem := v_2
16186		v.reset(OpAMD64SARQconst)
16187		v.AuxInt = int8ToAuxInt(int8(c & 63))
16188		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
16189		v0.AuxInt = int32ToAuxInt(off)
16190		v0.Aux = symToAux(sym)
16191		v0.AddArg2(ptr, mem)
16192		v.AddArg(v0)
16193		return true
16194	}
16195	// match: (SARXQload [off] {sym} ptr (MOVLconst [c]) mem)
16196	// result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
16197	for {
16198		off := auxIntToInt32(v.AuxInt)
16199		sym := auxToSym(v.Aux)
16200		ptr := v_0
16201		if v_1.Op != OpAMD64MOVLconst {
16202			break
16203		}
16204		c := auxIntToInt32(v_1.AuxInt)
16205		mem := v_2
16206		v.reset(OpAMD64SARQconst)
16207		v.AuxInt = int8ToAuxInt(int8(c & 63))
16208		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
16209		v0.AuxInt = int32ToAuxInt(off)
16210		v0.Aux = symToAux(sym)
16211		v0.AddArg2(ptr, mem)
16212		v.AddArg(v0)
16213		return true
16214	}
16215	return false
16216}
16217func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
16218	v_0 := v.Args[0]
16219	// match: (SBBLcarrymask (FlagEQ))
16220	// result: (MOVLconst [0])
16221	for {
16222		if v_0.Op != OpAMD64FlagEQ {
16223			break
16224		}
16225		v.reset(OpAMD64MOVLconst)
16226		v.AuxInt = int32ToAuxInt(0)
16227		return true
16228	}
16229	// match: (SBBLcarrymask (FlagLT_ULT))
16230	// result: (MOVLconst [-1])
16231	for {
16232		if v_0.Op != OpAMD64FlagLT_ULT {
16233			break
16234		}
16235		v.reset(OpAMD64MOVLconst)
16236		v.AuxInt = int32ToAuxInt(-1)
16237		return true
16238	}
16239	// match: (SBBLcarrymask (FlagLT_UGT))
16240	// result: (MOVLconst [0])
16241	for {
16242		if v_0.Op != OpAMD64FlagLT_UGT {
16243			break
16244		}
16245		v.reset(OpAMD64MOVLconst)
16246		v.AuxInt = int32ToAuxInt(0)
16247		return true
16248	}
16249	// match: (SBBLcarrymask (FlagGT_ULT))
16250	// result: (MOVLconst [-1])
16251	for {
16252		if v_0.Op != OpAMD64FlagGT_ULT {
16253			break
16254		}
16255		v.reset(OpAMD64MOVLconst)
16256		v.AuxInt = int32ToAuxInt(-1)
16257		return true
16258	}
16259	// match: (SBBLcarrymask (FlagGT_UGT))
16260	// result: (MOVLconst [0])
16261	for {
16262		if v_0.Op != OpAMD64FlagGT_UGT {
16263			break
16264		}
16265		v.reset(OpAMD64MOVLconst)
16266		v.AuxInt = int32ToAuxInt(0)
16267		return true
16268	}
16269	return false
16270}
16271func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
16272	v_2 := v.Args[2]
16273	v_1 := v.Args[1]
16274	v_0 := v.Args[0]
16275	// match: (SBBQ x (MOVQconst [c]) borrow)
16276	// cond: is32Bit(c)
16277	// result: (SBBQconst x [int32(c)] borrow)
16278	for {
16279		x := v_0
16280		if v_1.Op != OpAMD64MOVQconst {
16281			break
16282		}
16283		c := auxIntToInt64(v_1.AuxInt)
16284		borrow := v_2
16285		if !(is32Bit(c)) {
16286			break
16287		}
16288		v.reset(OpAMD64SBBQconst)
16289		v.AuxInt = int32ToAuxInt(int32(c))
16290		v.AddArg2(x, borrow)
16291		return true
16292	}
16293	// match: (SBBQ x y (FlagEQ))
16294	// result: (SUBQborrow x y)
16295	for {
16296		x := v_0
16297		y := v_1
16298		if v_2.Op != OpAMD64FlagEQ {
16299			break
16300		}
16301		v.reset(OpAMD64SUBQborrow)
16302		v.AddArg2(x, y)
16303		return true
16304	}
16305	return false
16306}
16307func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
16308	v_0 := v.Args[0]
16309	// match: (SBBQcarrymask (FlagEQ))
16310	// result: (MOVQconst [0])
16311	for {
16312		if v_0.Op != OpAMD64FlagEQ {
16313			break
16314		}
16315		v.reset(OpAMD64MOVQconst)
16316		v.AuxInt = int64ToAuxInt(0)
16317		return true
16318	}
16319	// match: (SBBQcarrymask (FlagLT_ULT))
16320	// result: (MOVQconst [-1])
16321	for {
16322		if v_0.Op != OpAMD64FlagLT_ULT {
16323			break
16324		}
16325		v.reset(OpAMD64MOVQconst)
16326		v.AuxInt = int64ToAuxInt(-1)
16327		return true
16328	}
16329	// match: (SBBQcarrymask (FlagLT_UGT))
16330	// result: (MOVQconst [0])
16331	for {
16332		if v_0.Op != OpAMD64FlagLT_UGT {
16333			break
16334		}
16335		v.reset(OpAMD64MOVQconst)
16336		v.AuxInt = int64ToAuxInt(0)
16337		return true
16338	}
16339	// match: (SBBQcarrymask (FlagGT_ULT))
16340	// result: (MOVQconst [-1])
16341	for {
16342		if v_0.Op != OpAMD64FlagGT_ULT {
16343			break
16344		}
16345		v.reset(OpAMD64MOVQconst)
16346		v.AuxInt = int64ToAuxInt(-1)
16347		return true
16348	}
16349	// match: (SBBQcarrymask (FlagGT_UGT))
16350	// result: (MOVQconst [0])
16351	for {
16352		if v_0.Op != OpAMD64FlagGT_UGT {
16353			break
16354		}
16355		v.reset(OpAMD64MOVQconst)
16356		v.AuxInt = int64ToAuxInt(0)
16357		return true
16358	}
16359	return false
16360}
16361func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
16362	v_1 := v.Args[1]
16363	v_0 := v.Args[0]
16364	// match: (SBBQconst x [c] (FlagEQ))
16365	// result: (SUBQconstborrow x [c])
16366	for {
16367		c := auxIntToInt32(v.AuxInt)
16368		x := v_0
16369		if v_1.Op != OpAMD64FlagEQ {
16370			break
16371		}
16372		v.reset(OpAMD64SUBQconstborrow)
16373		v.AuxInt = int32ToAuxInt(c)
16374		v.AddArg(x)
16375		return true
16376	}
16377	return false
16378}
16379func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
16380	v_0 := v.Args[0]
16381	// match: (SETA (InvertFlags x))
16382	// result: (SETB x)
16383	for {
16384		if v_0.Op != OpAMD64InvertFlags {
16385			break
16386		}
16387		x := v_0.Args[0]
16388		v.reset(OpAMD64SETB)
16389		v.AddArg(x)
16390		return true
16391	}
16392	// match: (SETA (FlagEQ))
16393	// result: (MOVLconst [0])
16394	for {
16395		if v_0.Op != OpAMD64FlagEQ {
16396			break
16397		}
16398		v.reset(OpAMD64MOVLconst)
16399		v.AuxInt = int32ToAuxInt(0)
16400		return true
16401	}
16402	// match: (SETA (FlagLT_ULT))
16403	// result: (MOVLconst [0])
16404	for {
16405		if v_0.Op != OpAMD64FlagLT_ULT {
16406			break
16407		}
16408		v.reset(OpAMD64MOVLconst)
16409		v.AuxInt = int32ToAuxInt(0)
16410		return true
16411	}
16412	// match: (SETA (FlagLT_UGT))
16413	// result: (MOVLconst [1])
16414	for {
16415		if v_0.Op != OpAMD64FlagLT_UGT {
16416			break
16417		}
16418		v.reset(OpAMD64MOVLconst)
16419		v.AuxInt = int32ToAuxInt(1)
16420		return true
16421	}
16422	// match: (SETA (FlagGT_ULT))
16423	// result: (MOVLconst [0])
16424	for {
16425		if v_0.Op != OpAMD64FlagGT_ULT {
16426			break
16427		}
16428		v.reset(OpAMD64MOVLconst)
16429		v.AuxInt = int32ToAuxInt(0)
16430		return true
16431	}
16432	// match: (SETA (FlagGT_UGT))
16433	// result: (MOVLconst [1])
16434	for {
16435		if v_0.Op != OpAMD64FlagGT_UGT {
16436			break
16437		}
16438		v.reset(OpAMD64MOVLconst)
16439		v.AuxInt = int32ToAuxInt(1)
16440		return true
16441	}
16442	return false
16443}
16444func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
16445	v_0 := v.Args[0]
16446	// match: (SETAE (TESTQ x x))
16447	// result: (ConstBool [true])
16448	for {
16449		if v_0.Op != OpAMD64TESTQ {
16450			break
16451		}
16452		x := v_0.Args[1]
16453		if x != v_0.Args[0] {
16454			break
16455		}
16456		v.reset(OpConstBool)
16457		v.AuxInt = boolToAuxInt(true)
16458		return true
16459	}
16460	// match: (SETAE (TESTL x x))
16461	// result: (ConstBool [true])
16462	for {
16463		if v_0.Op != OpAMD64TESTL {
16464			break
16465		}
16466		x := v_0.Args[1]
16467		if x != v_0.Args[0] {
16468			break
16469		}
16470		v.reset(OpConstBool)
16471		v.AuxInt = boolToAuxInt(true)
16472		return true
16473	}
16474	// match: (SETAE (TESTW x x))
16475	// result: (ConstBool [true])
16476	for {
16477		if v_0.Op != OpAMD64TESTW {
16478			break
16479		}
16480		x := v_0.Args[1]
16481		if x != v_0.Args[0] {
16482			break
16483		}
16484		v.reset(OpConstBool)
16485		v.AuxInt = boolToAuxInt(true)
16486		return true
16487	}
16488	// match: (SETAE (TESTB x x))
16489	// result: (ConstBool [true])
16490	for {
16491		if v_0.Op != OpAMD64TESTB {
16492			break
16493		}
16494		x := v_0.Args[1]
16495		if x != v_0.Args[0] {
16496			break
16497		}
16498		v.reset(OpConstBool)
16499		v.AuxInt = boolToAuxInt(true)
16500		return true
16501	}
16502	// match: (SETAE (InvertFlags x))
16503	// result: (SETBE x)
16504	for {
16505		if v_0.Op != OpAMD64InvertFlags {
16506			break
16507		}
16508		x := v_0.Args[0]
16509		v.reset(OpAMD64SETBE)
16510		v.AddArg(x)
16511		return true
16512	}
16513	// match: (SETAE (FlagEQ))
16514	// result: (MOVLconst [1])
16515	for {
16516		if v_0.Op != OpAMD64FlagEQ {
16517			break
16518		}
16519		v.reset(OpAMD64MOVLconst)
16520		v.AuxInt = int32ToAuxInt(1)
16521		return true
16522	}
16523	// match: (SETAE (FlagLT_ULT))
16524	// result: (MOVLconst [0])
16525	for {
16526		if v_0.Op != OpAMD64FlagLT_ULT {
16527			break
16528		}
16529		v.reset(OpAMD64MOVLconst)
16530		v.AuxInt = int32ToAuxInt(0)
16531		return true
16532	}
16533	// match: (SETAE (FlagLT_UGT))
16534	// result: (MOVLconst [1])
16535	for {
16536		if v_0.Op != OpAMD64FlagLT_UGT {
16537			break
16538		}
16539		v.reset(OpAMD64MOVLconst)
16540		v.AuxInt = int32ToAuxInt(1)
16541		return true
16542	}
16543	// match: (SETAE (FlagGT_ULT))
16544	// result: (MOVLconst [0])
16545	for {
16546		if v_0.Op != OpAMD64FlagGT_ULT {
16547			break
16548		}
16549		v.reset(OpAMD64MOVLconst)
16550		v.AuxInt = int32ToAuxInt(0)
16551		return true
16552	}
16553	// match: (SETAE (FlagGT_UGT))
16554	// result: (MOVLconst [1])
16555	for {
16556		if v_0.Op != OpAMD64FlagGT_UGT {
16557			break
16558		}
16559		v.reset(OpAMD64MOVLconst)
16560		v.AuxInt = int32ToAuxInt(1)
16561		return true
16562	}
16563	return false
16564}
16565func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
16566	v_2 := v.Args[2]
16567	v_1 := v.Args[1]
16568	v_0 := v.Args[0]
16569	b := v.Block
16570	typ := &b.Func.Config.Types
16571	// match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem)
16572	// result: (SETBEstore [off] {sym} ptr x mem)
16573	for {
16574		off := auxIntToInt32(v.AuxInt)
16575		sym := auxToSym(v.Aux)
16576		ptr := v_0
16577		if v_1.Op != OpAMD64InvertFlags {
16578			break
16579		}
16580		x := v_1.Args[0]
16581		mem := v_2
16582		v.reset(OpAMD64SETBEstore)
16583		v.AuxInt = int32ToAuxInt(off)
16584		v.Aux = symToAux(sym)
16585		v.AddArg3(ptr, x, mem)
16586		return true
16587	}
16588	// match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem)
16589	// cond: is32Bit(int64(off1)+int64(off2))
16590	// result: (SETAEstore [off1+off2] {sym} base val mem)
16591	for {
16592		off1 := auxIntToInt32(v.AuxInt)
16593		sym := auxToSym(v.Aux)
16594		if v_0.Op != OpAMD64ADDQconst {
16595			break
16596		}
16597		off2 := auxIntToInt32(v_0.AuxInt)
16598		base := v_0.Args[0]
16599		val := v_1
16600		mem := v_2
16601		if !(is32Bit(int64(off1) + int64(off2))) {
16602			break
16603		}
16604		v.reset(OpAMD64SETAEstore)
16605		v.AuxInt = int32ToAuxInt(off1 + off2)
16606		v.Aux = symToAux(sym)
16607		v.AddArg3(base, val, mem)
16608		return true
16609	}
16610	// match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
16611	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
16612	// result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
16613	for {
16614		off1 := auxIntToInt32(v.AuxInt)
16615		sym1 := auxToSym(v.Aux)
16616		if v_0.Op != OpAMD64LEAQ {
16617			break
16618		}
16619		off2 := auxIntToInt32(v_0.AuxInt)
16620		sym2 := auxToSym(v_0.Aux)
16621		base := v_0.Args[0]
16622		val := v_1
16623		mem := v_2
16624		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16625			break
16626		}
16627		v.reset(OpAMD64SETAEstore)
16628		v.AuxInt = int32ToAuxInt(off1 + off2)
16629		v.Aux = symToAux(mergeSym(sym1, sym2))
16630		v.AddArg3(base, val, mem)
16631		return true
16632	}
16633	// match: (SETAEstore [off] {sym} ptr (FlagEQ) mem)
16634	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
16635	for {
16636		off := auxIntToInt32(v.AuxInt)
16637		sym := auxToSym(v.Aux)
16638		ptr := v_0
16639		if v_1.Op != OpAMD64FlagEQ {
16640			break
16641		}
16642		mem := v_2
16643		v.reset(OpAMD64MOVBstore)
16644		v.AuxInt = int32ToAuxInt(off)
16645		v.Aux = symToAux(sym)
16646		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16647		v0.AuxInt = int32ToAuxInt(1)
16648		v.AddArg3(ptr, v0, mem)
16649		return true
16650	}
16651	// match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem)
16652	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
16653	for {
16654		off := auxIntToInt32(v.AuxInt)
16655		sym := auxToSym(v.Aux)
16656		ptr := v_0
16657		if v_1.Op != OpAMD64FlagLT_ULT {
16658			break
16659		}
16660		mem := v_2
16661		v.reset(OpAMD64MOVBstore)
16662		v.AuxInt = int32ToAuxInt(off)
16663		v.Aux = symToAux(sym)
16664		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16665		v0.AuxInt = int32ToAuxInt(0)
16666		v.AddArg3(ptr, v0, mem)
16667		return true
16668	}
16669	// match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem)
16670	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
16671	for {
16672		off := auxIntToInt32(v.AuxInt)
16673		sym := auxToSym(v.Aux)
16674		ptr := v_0
16675		if v_1.Op != OpAMD64FlagLT_UGT {
16676			break
16677		}
16678		mem := v_2
16679		v.reset(OpAMD64MOVBstore)
16680		v.AuxInt = int32ToAuxInt(off)
16681		v.Aux = symToAux(sym)
16682		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16683		v0.AuxInt = int32ToAuxInt(1)
16684		v.AddArg3(ptr, v0, mem)
16685		return true
16686	}
16687	// match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem)
16688	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
16689	for {
16690		off := auxIntToInt32(v.AuxInt)
16691		sym := auxToSym(v.Aux)
16692		ptr := v_0
16693		if v_1.Op != OpAMD64FlagGT_ULT {
16694			break
16695		}
16696		mem := v_2
16697		v.reset(OpAMD64MOVBstore)
16698		v.AuxInt = int32ToAuxInt(off)
16699		v.Aux = symToAux(sym)
16700		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16701		v0.AuxInt = int32ToAuxInt(0)
16702		v.AddArg3(ptr, v0, mem)
16703		return true
16704	}
16705	// match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem)
16706	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
16707	for {
16708		off := auxIntToInt32(v.AuxInt)
16709		sym := auxToSym(v.Aux)
16710		ptr := v_0
16711		if v_1.Op != OpAMD64FlagGT_UGT {
16712			break
16713		}
16714		mem := v_2
16715		v.reset(OpAMD64MOVBstore)
16716		v.AuxInt = int32ToAuxInt(off)
16717		v.Aux = symToAux(sym)
16718		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16719		v0.AuxInt = int32ToAuxInt(1)
16720		v.AddArg3(ptr, v0, mem)
16721		return true
16722	}
16723	return false
16724}
16725func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
16726	v_2 := v.Args[2]
16727	v_1 := v.Args[1]
16728	v_0 := v.Args[0]
16729	b := v.Block
16730	typ := &b.Func.Config.Types
16731	// match: (SETAstore [off] {sym} ptr (InvertFlags x) mem)
16732	// result: (SETBstore [off] {sym} ptr x mem)
16733	for {
16734		off := auxIntToInt32(v.AuxInt)
16735		sym := auxToSym(v.Aux)
16736		ptr := v_0
16737		if v_1.Op != OpAMD64InvertFlags {
16738			break
16739		}
16740		x := v_1.Args[0]
16741		mem := v_2
16742		v.reset(OpAMD64SETBstore)
16743		v.AuxInt = int32ToAuxInt(off)
16744		v.Aux = symToAux(sym)
16745		v.AddArg3(ptr, x, mem)
16746		return true
16747	}
16748	// match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem)
16749	// cond: is32Bit(int64(off1)+int64(off2))
16750	// result: (SETAstore [off1+off2] {sym} base val mem)
16751	for {
16752		off1 := auxIntToInt32(v.AuxInt)
16753		sym := auxToSym(v.Aux)
16754		if v_0.Op != OpAMD64ADDQconst {
16755			break
16756		}
16757		off2 := auxIntToInt32(v_0.AuxInt)
16758		base := v_0.Args[0]
16759		val := v_1
16760		mem := v_2
16761		if !(is32Bit(int64(off1) + int64(off2))) {
16762			break
16763		}
16764		v.reset(OpAMD64SETAstore)
16765		v.AuxInt = int32ToAuxInt(off1 + off2)
16766		v.Aux = symToAux(sym)
16767		v.AddArg3(base, val, mem)
16768		return true
16769	}
16770	// match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
16771	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
16772	// result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
16773	for {
16774		off1 := auxIntToInt32(v.AuxInt)
16775		sym1 := auxToSym(v.Aux)
16776		if v_0.Op != OpAMD64LEAQ {
16777			break
16778		}
16779		off2 := auxIntToInt32(v_0.AuxInt)
16780		sym2 := auxToSym(v_0.Aux)
16781		base := v_0.Args[0]
16782		val := v_1
16783		mem := v_2
16784		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16785			break
16786		}
16787		v.reset(OpAMD64SETAstore)
16788		v.AuxInt = int32ToAuxInt(off1 + off2)
16789		v.Aux = symToAux(mergeSym(sym1, sym2))
16790		v.AddArg3(base, val, mem)
16791		return true
16792	}
16793	// match: (SETAstore [off] {sym} ptr (FlagEQ) mem)
16794	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
16795	for {
16796		off := auxIntToInt32(v.AuxInt)
16797		sym := auxToSym(v.Aux)
16798		ptr := v_0
16799		if v_1.Op != OpAMD64FlagEQ {
16800			break
16801		}
16802		mem := v_2
16803		v.reset(OpAMD64MOVBstore)
16804		v.AuxInt = int32ToAuxInt(off)
16805		v.Aux = symToAux(sym)
16806		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16807		v0.AuxInt = int32ToAuxInt(0)
16808		v.AddArg3(ptr, v0, mem)
16809		return true
16810	}
16811	// match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem)
16812	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
16813	for {
16814		off := auxIntToInt32(v.AuxInt)
16815		sym := auxToSym(v.Aux)
16816		ptr := v_0
16817		if v_1.Op != OpAMD64FlagLT_ULT {
16818			break
16819		}
16820		mem := v_2
16821		v.reset(OpAMD64MOVBstore)
16822		v.AuxInt = int32ToAuxInt(off)
16823		v.Aux = symToAux(sym)
16824		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16825		v0.AuxInt = int32ToAuxInt(0)
16826		v.AddArg3(ptr, v0, mem)
16827		return true
16828	}
16829	// match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem)
16830	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
16831	for {
16832		off := auxIntToInt32(v.AuxInt)
16833		sym := auxToSym(v.Aux)
16834		ptr := v_0
16835		if v_1.Op != OpAMD64FlagLT_UGT {
16836			break
16837		}
16838		mem := v_2
16839		v.reset(OpAMD64MOVBstore)
16840		v.AuxInt = int32ToAuxInt(off)
16841		v.Aux = symToAux(sym)
16842		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16843		v0.AuxInt = int32ToAuxInt(1)
16844		v.AddArg3(ptr, v0, mem)
16845		return true
16846	}
16847	// match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem)
16848	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
16849	for {
16850		off := auxIntToInt32(v.AuxInt)
16851		sym := auxToSym(v.Aux)
16852		ptr := v_0
16853		if v_1.Op != OpAMD64FlagGT_ULT {
16854			break
16855		}
16856		mem := v_2
16857		v.reset(OpAMD64MOVBstore)
16858		v.AuxInt = int32ToAuxInt(off)
16859		v.Aux = symToAux(sym)
16860		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16861		v0.AuxInt = int32ToAuxInt(0)
16862		v.AddArg3(ptr, v0, mem)
16863		return true
16864	}
16865	// match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem)
16866	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
16867	for {
16868		off := auxIntToInt32(v.AuxInt)
16869		sym := auxToSym(v.Aux)
16870		ptr := v_0
16871		if v_1.Op != OpAMD64FlagGT_UGT {
16872			break
16873		}
16874		mem := v_2
16875		v.reset(OpAMD64MOVBstore)
16876		v.AuxInt = int32ToAuxInt(off)
16877		v.Aux = symToAux(sym)
16878		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16879		v0.AuxInt = int32ToAuxInt(1)
16880		v.AddArg3(ptr, v0, mem)
16881		return true
16882	}
16883	return false
16884}
16885func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
16886	v_0 := v.Args[0]
16887	// match: (SETB (TESTQ x x))
16888	// result: (ConstBool [false])
16889	for {
16890		if v_0.Op != OpAMD64TESTQ {
16891			break
16892		}
16893		x := v_0.Args[1]
16894		if x != v_0.Args[0] {
16895			break
16896		}
16897		v.reset(OpConstBool)
16898		v.AuxInt = boolToAuxInt(false)
16899		return true
16900	}
16901	// match: (SETB (TESTL x x))
16902	// result: (ConstBool [false])
16903	for {
16904		if v_0.Op != OpAMD64TESTL {
16905			break
16906		}
16907		x := v_0.Args[1]
16908		if x != v_0.Args[0] {
16909			break
16910		}
16911		v.reset(OpConstBool)
16912		v.AuxInt = boolToAuxInt(false)
16913		return true
16914	}
16915	// match: (SETB (TESTW x x))
16916	// result: (ConstBool [false])
16917	for {
16918		if v_0.Op != OpAMD64TESTW {
16919			break
16920		}
16921		x := v_0.Args[1]
16922		if x != v_0.Args[0] {
16923			break
16924		}
16925		v.reset(OpConstBool)
16926		v.AuxInt = boolToAuxInt(false)
16927		return true
16928	}
16929	// match: (SETB (TESTB x x))
16930	// result: (ConstBool [false])
16931	for {
16932		if v_0.Op != OpAMD64TESTB {
16933			break
16934		}
16935		x := v_0.Args[1]
16936		if x != v_0.Args[0] {
16937			break
16938		}
16939		v.reset(OpConstBool)
16940		v.AuxInt = boolToAuxInt(false)
16941		return true
16942	}
16943	// match: (SETB (BTLconst [0] x))
16944	// result: (ANDLconst [1] x)
16945	for {
16946		if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
16947			break
16948		}
16949		x := v_0.Args[0]
16950		v.reset(OpAMD64ANDLconst)
16951		v.AuxInt = int32ToAuxInt(1)
16952		v.AddArg(x)
16953		return true
16954	}
16955	// match: (SETB (BTQconst [0] x))
16956	// result: (ANDQconst [1] x)
16957	for {
16958		if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
16959			break
16960		}
16961		x := v_0.Args[0]
16962		v.reset(OpAMD64ANDQconst)
16963		v.AuxInt = int32ToAuxInt(1)
16964		v.AddArg(x)
16965		return true
16966	}
16967	// match: (SETB (InvertFlags x))
16968	// result: (SETA x)
16969	for {
16970		if v_0.Op != OpAMD64InvertFlags {
16971			break
16972		}
16973		x := v_0.Args[0]
16974		v.reset(OpAMD64SETA)
16975		v.AddArg(x)
16976		return true
16977	}
16978	// match: (SETB (FlagEQ))
16979	// result: (MOVLconst [0])
16980	for {
16981		if v_0.Op != OpAMD64FlagEQ {
16982			break
16983		}
16984		v.reset(OpAMD64MOVLconst)
16985		v.AuxInt = int32ToAuxInt(0)
16986		return true
16987	}
16988	// match: (SETB (FlagLT_ULT))
16989	// result: (MOVLconst [1])
16990	for {
16991		if v_0.Op != OpAMD64FlagLT_ULT {
16992			break
16993		}
16994		v.reset(OpAMD64MOVLconst)
16995		v.AuxInt = int32ToAuxInt(1)
16996		return true
16997	}
16998	// match: (SETB (FlagLT_UGT))
16999	// result: (MOVLconst [0])
17000	for {
17001		if v_0.Op != OpAMD64FlagLT_UGT {
17002			break
17003		}
17004		v.reset(OpAMD64MOVLconst)
17005		v.AuxInt = int32ToAuxInt(0)
17006		return true
17007	}
17008	// match: (SETB (FlagGT_ULT))
17009	// result: (MOVLconst [1])
17010	for {
17011		if v_0.Op != OpAMD64FlagGT_ULT {
17012			break
17013		}
17014		v.reset(OpAMD64MOVLconst)
17015		v.AuxInt = int32ToAuxInt(1)
17016		return true
17017	}
17018	// match: (SETB (FlagGT_UGT))
17019	// result: (MOVLconst [0])
17020	for {
17021		if v_0.Op != OpAMD64FlagGT_UGT {
17022			break
17023		}
17024		v.reset(OpAMD64MOVLconst)
17025		v.AuxInt = int32ToAuxInt(0)
17026		return true
17027	}
17028	return false
17029}
17030func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
17031	v_0 := v.Args[0]
17032	// match: (SETBE (InvertFlags x))
17033	// result: (SETAE x)
17034	for {
17035		if v_0.Op != OpAMD64InvertFlags {
17036			break
17037		}
17038		x := v_0.Args[0]
17039		v.reset(OpAMD64SETAE)
17040		v.AddArg(x)
17041		return true
17042	}
17043	// match: (SETBE (FlagEQ))
17044	// result: (MOVLconst [1])
17045	for {
17046		if v_0.Op != OpAMD64FlagEQ {
17047			break
17048		}
17049		v.reset(OpAMD64MOVLconst)
17050		v.AuxInt = int32ToAuxInt(1)
17051		return true
17052	}
17053	// match: (SETBE (FlagLT_ULT))
17054	// result: (MOVLconst [1])
17055	for {
17056		if v_0.Op != OpAMD64FlagLT_ULT {
17057			break
17058		}
17059		v.reset(OpAMD64MOVLconst)
17060		v.AuxInt = int32ToAuxInt(1)
17061		return true
17062	}
17063	// match: (SETBE (FlagLT_UGT))
17064	// result: (MOVLconst [0])
17065	for {
17066		if v_0.Op != OpAMD64FlagLT_UGT {
17067			break
17068		}
17069		v.reset(OpAMD64MOVLconst)
17070		v.AuxInt = int32ToAuxInt(0)
17071		return true
17072	}
17073	// match: (SETBE (FlagGT_ULT))
17074	// result: (MOVLconst [1])
17075	for {
17076		if v_0.Op != OpAMD64FlagGT_ULT {
17077			break
17078		}
17079		v.reset(OpAMD64MOVLconst)
17080		v.AuxInt = int32ToAuxInt(1)
17081		return true
17082	}
17083	// match: (SETBE (FlagGT_UGT))
17084	// result: (MOVLconst [0])
17085	for {
17086		if v_0.Op != OpAMD64FlagGT_UGT {
17087			break
17088		}
17089		v.reset(OpAMD64MOVLconst)
17090		v.AuxInt = int32ToAuxInt(0)
17091		return true
17092	}
17093	return false
17094}
17095func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
17096	v_2 := v.Args[2]
17097	v_1 := v.Args[1]
17098	v_0 := v.Args[0]
17099	b := v.Block
17100	typ := &b.Func.Config.Types
17101	// match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem)
17102	// result: (SETAEstore [off] {sym} ptr x mem)
17103	for {
17104		off := auxIntToInt32(v.AuxInt)
17105		sym := auxToSym(v.Aux)
17106		ptr := v_0
17107		if v_1.Op != OpAMD64InvertFlags {
17108			break
17109		}
17110		x := v_1.Args[0]
17111		mem := v_2
17112		v.reset(OpAMD64SETAEstore)
17113		v.AuxInt = int32ToAuxInt(off)
17114		v.Aux = symToAux(sym)
17115		v.AddArg3(ptr, x, mem)
17116		return true
17117	}
17118	// match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem)
17119	// cond: is32Bit(int64(off1)+int64(off2))
17120	// result: (SETBEstore [off1+off2] {sym} base val mem)
17121	for {
17122		off1 := auxIntToInt32(v.AuxInt)
17123		sym := auxToSym(v.Aux)
17124		if v_0.Op != OpAMD64ADDQconst {
17125			break
17126		}
17127		off2 := auxIntToInt32(v_0.AuxInt)
17128		base := v_0.Args[0]
17129		val := v_1
17130		mem := v_2
17131		if !(is32Bit(int64(off1) + int64(off2))) {
17132			break
17133		}
17134		v.reset(OpAMD64SETBEstore)
17135		v.AuxInt = int32ToAuxInt(off1 + off2)
17136		v.Aux = symToAux(sym)
17137		v.AddArg3(base, val, mem)
17138		return true
17139	}
17140	// match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
17141	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
17142	// result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
17143	for {
17144		off1 := auxIntToInt32(v.AuxInt)
17145		sym1 := auxToSym(v.Aux)
17146		if v_0.Op != OpAMD64LEAQ {
17147			break
17148		}
17149		off2 := auxIntToInt32(v_0.AuxInt)
17150		sym2 := auxToSym(v_0.Aux)
17151		base := v_0.Args[0]
17152		val := v_1
17153		mem := v_2
17154		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17155			break
17156		}
17157		v.reset(OpAMD64SETBEstore)
17158		v.AuxInt = int32ToAuxInt(off1 + off2)
17159		v.Aux = symToAux(mergeSym(sym1, sym2))
17160		v.AddArg3(base, val, mem)
17161		return true
17162	}
17163	// match: (SETBEstore [off] {sym} ptr (FlagEQ) mem)
17164	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
17165	for {
17166		off := auxIntToInt32(v.AuxInt)
17167		sym := auxToSym(v.Aux)
17168		ptr := v_0
17169		if v_1.Op != OpAMD64FlagEQ {
17170			break
17171		}
17172		mem := v_2
17173		v.reset(OpAMD64MOVBstore)
17174		v.AuxInt = int32ToAuxInt(off)
17175		v.Aux = symToAux(sym)
17176		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17177		v0.AuxInt = int32ToAuxInt(1)
17178		v.AddArg3(ptr, v0, mem)
17179		return true
17180	}
17181	// match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem)
17182	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
17183	for {
17184		off := auxIntToInt32(v.AuxInt)
17185		sym := auxToSym(v.Aux)
17186		ptr := v_0
17187		if v_1.Op != OpAMD64FlagLT_ULT {
17188			break
17189		}
17190		mem := v_2
17191		v.reset(OpAMD64MOVBstore)
17192		v.AuxInt = int32ToAuxInt(off)
17193		v.Aux = symToAux(sym)
17194		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17195		v0.AuxInt = int32ToAuxInt(1)
17196		v.AddArg3(ptr, v0, mem)
17197		return true
17198	}
17199	// match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem)
17200	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
17201	for {
17202		off := auxIntToInt32(v.AuxInt)
17203		sym := auxToSym(v.Aux)
17204		ptr := v_0
17205		if v_1.Op != OpAMD64FlagLT_UGT {
17206			break
17207		}
17208		mem := v_2
17209		v.reset(OpAMD64MOVBstore)
17210		v.AuxInt = int32ToAuxInt(off)
17211		v.Aux = symToAux(sym)
17212		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17213		v0.AuxInt = int32ToAuxInt(0)
17214		v.AddArg3(ptr, v0, mem)
17215		return true
17216	}
17217	// match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem)
17218	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
17219	for {
17220		off := auxIntToInt32(v.AuxInt)
17221		sym := auxToSym(v.Aux)
17222		ptr := v_0
17223		if v_1.Op != OpAMD64FlagGT_ULT {
17224			break
17225		}
17226		mem := v_2
17227		v.reset(OpAMD64MOVBstore)
17228		v.AuxInt = int32ToAuxInt(off)
17229		v.Aux = symToAux(sym)
17230		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17231		v0.AuxInt = int32ToAuxInt(1)
17232		v.AddArg3(ptr, v0, mem)
17233		return true
17234	}
17235	// match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem)
17236	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
17237	for {
17238		off := auxIntToInt32(v.AuxInt)
17239		sym := auxToSym(v.Aux)
17240		ptr := v_0
17241		if v_1.Op != OpAMD64FlagGT_UGT {
17242			break
17243		}
17244		mem := v_2
17245		v.reset(OpAMD64MOVBstore)
17246		v.AuxInt = int32ToAuxInt(off)
17247		v.Aux = symToAux(sym)
17248		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17249		v0.AuxInt = int32ToAuxInt(0)
17250		v.AddArg3(ptr, v0, mem)
17251		return true
17252	}
17253	return false
17254}
17255func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
17256	v_2 := v.Args[2]
17257	v_1 := v.Args[1]
17258	v_0 := v.Args[0]
17259	b := v.Block
17260	typ := &b.Func.Config.Types
17261	// match: (SETBstore [off] {sym} ptr (InvertFlags x) mem)
17262	// result: (SETAstore [off] {sym} ptr x mem)
17263	for {
17264		off := auxIntToInt32(v.AuxInt)
17265		sym := auxToSym(v.Aux)
17266		ptr := v_0
17267		if v_1.Op != OpAMD64InvertFlags {
17268			break
17269		}
17270		x := v_1.Args[0]
17271		mem := v_2
17272		v.reset(OpAMD64SETAstore)
17273		v.AuxInt = int32ToAuxInt(off)
17274		v.Aux = symToAux(sym)
17275		v.AddArg3(ptr, x, mem)
17276		return true
17277	}
17278	// match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem)
17279	// cond: is32Bit(int64(off1)+int64(off2))
17280	// result: (SETBstore [off1+off2] {sym} base val mem)
17281	for {
17282		off1 := auxIntToInt32(v.AuxInt)
17283		sym := auxToSym(v.Aux)
17284		if v_0.Op != OpAMD64ADDQconst {
17285			break
17286		}
17287		off2 := auxIntToInt32(v_0.AuxInt)
17288		base := v_0.Args[0]
17289		val := v_1
17290		mem := v_2
17291		if !(is32Bit(int64(off1) + int64(off2))) {
17292			break
17293		}
17294		v.reset(OpAMD64SETBstore)
17295		v.AuxInt = int32ToAuxInt(off1 + off2)
17296		v.Aux = symToAux(sym)
17297		v.AddArg3(base, val, mem)
17298		return true
17299	}
17300	// match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
17301	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
17302	// result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
17303	for {
17304		off1 := auxIntToInt32(v.AuxInt)
17305		sym1 := auxToSym(v.Aux)
17306		if v_0.Op != OpAMD64LEAQ {
17307			break
17308		}
17309		off2 := auxIntToInt32(v_0.AuxInt)
17310		sym2 := auxToSym(v_0.Aux)
17311		base := v_0.Args[0]
17312		val := v_1
17313		mem := v_2
17314		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17315			break
17316		}
17317		v.reset(OpAMD64SETBstore)
17318		v.AuxInt = int32ToAuxInt(off1 + off2)
17319		v.Aux = symToAux(mergeSym(sym1, sym2))
17320		v.AddArg3(base, val, mem)
17321		return true
17322	}
17323	// match: (SETBstore [off] {sym} ptr (FlagEQ) mem)
17324	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
17325	for {
17326		off := auxIntToInt32(v.AuxInt)
17327		sym := auxToSym(v.Aux)
17328		ptr := v_0
17329		if v_1.Op != OpAMD64FlagEQ {
17330			break
17331		}
17332		mem := v_2
17333		v.reset(OpAMD64MOVBstore)
17334		v.AuxInt = int32ToAuxInt(off)
17335		v.Aux = symToAux(sym)
17336		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17337		v0.AuxInt = int32ToAuxInt(0)
17338		v.AddArg3(ptr, v0, mem)
17339		return true
17340	}
17341	// match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem)
17342	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
17343	for {
17344		off := auxIntToInt32(v.AuxInt)
17345		sym := auxToSym(v.Aux)
17346		ptr := v_0
17347		if v_1.Op != OpAMD64FlagLT_ULT {
17348			break
17349		}
17350		mem := v_2
17351		v.reset(OpAMD64MOVBstore)
17352		v.AuxInt = int32ToAuxInt(off)
17353		v.Aux = symToAux(sym)
17354		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17355		v0.AuxInt = int32ToAuxInt(1)
17356		v.AddArg3(ptr, v0, mem)
17357		return true
17358	}
17359	// match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem)
17360	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
17361	for {
17362		off := auxIntToInt32(v.AuxInt)
17363		sym := auxToSym(v.Aux)
17364		ptr := v_0
17365		if v_1.Op != OpAMD64FlagLT_UGT {
17366			break
17367		}
17368		mem := v_2
17369		v.reset(OpAMD64MOVBstore)
17370		v.AuxInt = int32ToAuxInt(off)
17371		v.Aux = symToAux(sym)
17372		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17373		v0.AuxInt = int32ToAuxInt(0)
17374		v.AddArg3(ptr, v0, mem)
17375		return true
17376	}
17377	// match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem)
17378	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
17379	for {
17380		off := auxIntToInt32(v.AuxInt)
17381		sym := auxToSym(v.Aux)
17382		ptr := v_0
17383		if v_1.Op != OpAMD64FlagGT_ULT {
17384			break
17385		}
17386		mem := v_2
17387		v.reset(OpAMD64MOVBstore)
17388		v.AuxInt = int32ToAuxInt(off)
17389		v.Aux = symToAux(sym)
17390		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17391		v0.AuxInt = int32ToAuxInt(1)
17392		v.AddArg3(ptr, v0, mem)
17393		return true
17394	}
17395	// match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem)
17396	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
17397	for {
17398		off := auxIntToInt32(v.AuxInt)
17399		sym := auxToSym(v.Aux)
17400		ptr := v_0
17401		if v_1.Op != OpAMD64FlagGT_UGT {
17402			break
17403		}
17404		mem := v_2
17405		v.reset(OpAMD64MOVBstore)
17406		v.AuxInt = int32ToAuxInt(off)
17407		v.Aux = symToAux(sym)
17408		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17409		v0.AuxInt = int32ToAuxInt(0)
17410		v.AddArg3(ptr, v0, mem)
17411		return true
17412	}
17413	return false
17414}
17415func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
17416	v_0 := v.Args[0]
17417	b := v.Block
17418	// match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y))
17419	// result: (SETAE (BTL x y))
17420	for {
17421		if v_0.Op != OpAMD64TESTL {
17422			break
17423		}
17424		_ = v_0.Args[1]
17425		v_0_0 := v_0.Args[0]
17426		v_0_1 := v_0.Args[1]
17427		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17428			if v_0_0.Op != OpAMD64SHLL {
17429				continue
17430			}
17431			x := v_0_0.Args[1]
17432			v_0_0_0 := v_0_0.Args[0]
17433			if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
17434				continue
17435			}
17436			y := v_0_1
17437			v.reset(OpAMD64SETAE)
17438			v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17439			v0.AddArg2(x, y)
17440			v.AddArg(v0)
17441			return true
17442		}
17443		break
17444	}
17445	// match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
17446	// result: (SETAE (BTQ x y))
17447	for {
17448		if v_0.Op != OpAMD64TESTQ {
17449			break
17450		}
17451		_ = v_0.Args[1]
17452		v_0_0 := v_0.Args[0]
17453		v_0_1 := v_0.Args[1]
17454		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17455			if v_0_0.Op != OpAMD64SHLQ {
17456				continue
17457			}
17458			x := v_0_0.Args[1]
17459			v_0_0_0 := v_0_0.Args[0]
17460			if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
17461				continue
17462			}
17463			y := v_0_1
17464			v.reset(OpAMD64SETAE)
17465			v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
17466			v0.AddArg2(x, y)
17467			v.AddArg(v0)
17468			return true
17469		}
17470		break
17471	}
17472	// match: (SETEQ (TESTLconst [c] x))
17473	// cond: isUint32PowerOfTwo(int64(c))
17474	// result: (SETAE (BTLconst [int8(log32(c))] x))
17475	for {
17476		if v_0.Op != OpAMD64TESTLconst {
17477			break
17478		}
17479		c := auxIntToInt32(v_0.AuxInt)
17480		x := v_0.Args[0]
17481		if !(isUint32PowerOfTwo(int64(c))) {
17482			break
17483		}
17484		v.reset(OpAMD64SETAE)
17485		v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17486		v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17487		v0.AddArg(x)
17488		v.AddArg(v0)
17489		return true
17490	}
17491	// match: (SETEQ (TESTQconst [c] x))
17492	// cond: isUint64PowerOfTwo(int64(c))
17493	// result: (SETAE (BTQconst [int8(log32(c))] x))
17494	for {
17495		if v_0.Op != OpAMD64TESTQconst {
17496			break
17497		}
17498		c := auxIntToInt32(v_0.AuxInt)
17499		x := v_0.Args[0]
17500		if !(isUint64PowerOfTwo(int64(c))) {
17501			break
17502		}
17503		v.reset(OpAMD64SETAE)
17504		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17505		v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17506		v0.AddArg(x)
17507		v.AddArg(v0)
17508		return true
17509	}
17510	// match: (SETEQ (TESTQ (MOVQconst [c]) x))
17511	// cond: isUint64PowerOfTwo(c)
17512	// result: (SETAE (BTQconst [int8(log64(c))] x))
17513	for {
17514		if v_0.Op != OpAMD64TESTQ {
17515			break
17516		}
17517		_ = v_0.Args[1]
17518		v_0_0 := v_0.Args[0]
17519		v_0_1 := v_0.Args[1]
17520		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17521			if v_0_0.Op != OpAMD64MOVQconst {
17522				continue
17523			}
17524			c := auxIntToInt64(v_0_0.AuxInt)
17525			x := v_0_1
17526			if !(isUint64PowerOfTwo(c)) {
17527				continue
17528			}
17529			v.reset(OpAMD64SETAE)
17530			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17531			v0.AuxInt = int8ToAuxInt(int8(log64(c)))
17532			v0.AddArg(x)
17533			v.AddArg(v0)
17534			return true
17535		}
17536		break
17537	}
17538	// match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _)))
17539	// result: (SETNE (CMPLconst [0] s))
17540	for {
17541		if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
17542			break
17543		}
17544		s := v_0.Args[0]
17545		if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
17546			break
17547		}
17548		v.reset(OpAMD64SETNE)
17549		v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
17550		v0.AuxInt = int32ToAuxInt(0)
17551		v0.AddArg(s)
17552		v.AddArg(v0)
17553		return true
17554	}
17555	// match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _)))
17556	// result: (SETNE (CMPQconst [0] s))
17557	for {
17558		if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
17559			break
17560		}
17561		s := v_0.Args[0]
17562		if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
17563			break
17564		}
17565		v.reset(OpAMD64SETNE)
17566		v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
17567		v0.AuxInt = int32ToAuxInt(0)
17568		v0.AddArg(s)
17569		v.AddArg(v0)
17570		return true
17571	}
17572	// match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
17573	// cond: z1==z2
17574	// result: (SETAE (BTQconst [63] x))
17575	for {
17576		if v_0.Op != OpAMD64TESTQ {
17577			break
17578		}
17579		_ = v_0.Args[1]
17580		v_0_0 := v_0.Args[0]
17581		v_0_1 := v_0.Args[1]
17582		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17583			z1 := v_0_0
17584			if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
17585				continue
17586			}
17587			z1_0 := z1.Args[0]
17588			if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17589				continue
17590			}
17591			x := z1_0.Args[0]
17592			z2 := v_0_1
17593			if !(z1 == z2) {
17594				continue
17595			}
17596			v.reset(OpAMD64SETAE)
17597			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17598			v0.AuxInt = int8ToAuxInt(63)
17599			v0.AddArg(x)
17600			v.AddArg(v0)
17601			return true
17602		}
17603		break
17604	}
17605	// match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
17606	// cond: z1==z2
17607	// result: (SETAE (BTQconst [31] x))
17608	for {
17609		if v_0.Op != OpAMD64TESTL {
17610			break
17611		}
17612		_ = v_0.Args[1]
17613		v_0_0 := v_0.Args[0]
17614		v_0_1 := v_0.Args[1]
17615		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17616			z1 := v_0_0
17617			if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
17618				continue
17619			}
17620			z1_0 := z1.Args[0]
17621			if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17622				continue
17623			}
17624			x := z1_0.Args[0]
17625			z2 := v_0_1
17626			if !(z1 == z2) {
17627				continue
17628			}
17629			v.reset(OpAMD64SETAE)
17630			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17631			v0.AuxInt = int8ToAuxInt(31)
17632			v0.AddArg(x)
17633			v.AddArg(v0)
17634			return true
17635		}
17636		break
17637	}
17638	// match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
17639	// cond: z1==z2
17640	// result: (SETAE (BTQconst [0] x))
17641	for {
17642		if v_0.Op != OpAMD64TESTQ {
17643			break
17644		}
17645		_ = v_0.Args[1]
17646		v_0_0 := v_0.Args[0]
17647		v_0_1 := v_0.Args[1]
17648		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17649			z1 := v_0_0
17650			if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
17651				continue
17652			}
17653			z1_0 := z1.Args[0]
17654			if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17655				continue
17656			}
17657			x := z1_0.Args[0]
17658			z2 := v_0_1
17659			if !(z1 == z2) {
17660				continue
17661			}
17662			v.reset(OpAMD64SETAE)
17663			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17664			v0.AuxInt = int8ToAuxInt(0)
17665			v0.AddArg(x)
17666			v.AddArg(v0)
17667			return true
17668		}
17669		break
17670	}
17671	// match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
17672	// cond: z1==z2
17673	// result: (SETAE (BTLconst [0] x))
17674	for {
17675		if v_0.Op != OpAMD64TESTL {
17676			break
17677		}
17678		_ = v_0.Args[1]
17679		v_0_0 := v_0.Args[0]
17680		v_0_1 := v_0.Args[1]
17681		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17682			z1 := v_0_0
17683			if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
17684				continue
17685			}
17686			z1_0 := z1.Args[0]
17687			if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17688				continue
17689			}
17690			x := z1_0.Args[0]
17691			z2 := v_0_1
17692			if !(z1 == z2) {
17693				continue
17694			}
17695			v.reset(OpAMD64SETAE)
17696			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17697			v0.AuxInt = int8ToAuxInt(0)
17698			v0.AddArg(x)
17699			v.AddArg(v0)
17700			return true
17701		}
17702		break
17703	}
17704	// match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2))
17705	// cond: z1==z2
17706	// result: (SETAE (BTQconst [63] x))
17707	for {
17708		if v_0.Op != OpAMD64TESTQ {
17709			break
17710		}
17711		_ = v_0.Args[1]
17712		v_0_0 := v_0.Args[0]
17713		v_0_1 := v_0.Args[1]
17714		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17715			z1 := v_0_0
17716			if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
17717				continue
17718			}
17719			x := z1.Args[0]
17720			z2 := v_0_1
17721			if !(z1 == z2) {
17722				continue
17723			}
17724			v.reset(OpAMD64SETAE)
17725			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17726			v0.AuxInt = int8ToAuxInt(63)
17727			v0.AddArg(x)
17728			v.AddArg(v0)
17729			return true
17730		}
17731		break
17732	}
17733	// match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2))
17734	// cond: z1==z2
17735	// result: (SETAE (BTLconst [31] x))
17736	for {
17737		if v_0.Op != OpAMD64TESTL {
17738			break
17739		}
17740		_ = v_0.Args[1]
17741		v_0_0 := v_0.Args[0]
17742		v_0_1 := v_0.Args[1]
17743		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17744			z1 := v_0_0
17745			if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
17746				continue
17747			}
17748			x := z1.Args[0]
17749			z2 := v_0_1
17750			if !(z1 == z2) {
17751				continue
17752			}
17753			v.reset(OpAMD64SETAE)
17754			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17755			v0.AuxInt = int8ToAuxInt(31)
17756			v0.AddArg(x)
17757			v.AddArg(v0)
17758			return true
17759		}
17760		break
17761	}
17762	// match: (SETEQ (InvertFlags x))
17763	// result: (SETEQ x)
17764	for {
17765		if v_0.Op != OpAMD64InvertFlags {
17766			break
17767		}
17768		x := v_0.Args[0]
17769		v.reset(OpAMD64SETEQ)
17770		v.AddArg(x)
17771		return true
17772	}
17773	// match: (SETEQ (FlagEQ))
17774	// result: (MOVLconst [1])
17775	for {
17776		if v_0.Op != OpAMD64FlagEQ {
17777			break
17778		}
17779		v.reset(OpAMD64MOVLconst)
17780		v.AuxInt = int32ToAuxInt(1)
17781		return true
17782	}
17783	// match: (SETEQ (FlagLT_ULT))
17784	// result: (MOVLconst [0])
17785	for {
17786		if v_0.Op != OpAMD64FlagLT_ULT {
17787			break
17788		}
17789		v.reset(OpAMD64MOVLconst)
17790		v.AuxInt = int32ToAuxInt(0)
17791		return true
17792	}
17793	// match: (SETEQ (FlagLT_UGT))
17794	// result: (MOVLconst [0])
17795	for {
17796		if v_0.Op != OpAMD64FlagLT_UGT {
17797			break
17798		}
17799		v.reset(OpAMD64MOVLconst)
17800		v.AuxInt = int32ToAuxInt(0)
17801		return true
17802	}
17803	// match: (SETEQ (FlagGT_ULT))
17804	// result: (MOVLconst [0])
17805	for {
17806		if v_0.Op != OpAMD64FlagGT_ULT {
17807			break
17808		}
17809		v.reset(OpAMD64MOVLconst)
17810		v.AuxInt = int32ToAuxInt(0)
17811		return true
17812	}
17813	// match: (SETEQ (FlagGT_UGT))
17814	// result: (MOVLconst [0])
17815	for {
17816		if v_0.Op != OpAMD64FlagGT_UGT {
17817			break
17818		}
17819		v.reset(OpAMD64MOVLconst)
17820		v.AuxInt = int32ToAuxInt(0)
17821		return true
17822	}
17823	// match: (SETEQ (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
17824	// result: (SETEQ (Select1 <types.TypeFlags> blsr))
17825	for {
17826		if v_0.Op != OpAMD64TESTQ {
17827			break
17828		}
17829		_ = v_0.Args[1]
17830		v_0_0 := v_0.Args[0]
17831		v_0_1 := v_0.Args[1]
17832		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17833			s := v_0_0
17834			if s.Op != OpSelect0 {
17835				continue
17836			}
17837			blsr := s.Args[0]
17838			if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
17839				continue
17840			}
17841			v.reset(OpAMD64SETEQ)
17842			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
17843			v0.AddArg(blsr)
17844			v.AddArg(v0)
17845			return true
17846		}
17847		break
17848	}
17849	// match: (SETEQ (TESTL s:(Select0 blsr:(BLSRL _)) s))
17850	// result: (SETEQ (Select1 <types.TypeFlags> blsr))
17851	for {
17852		if v_0.Op != OpAMD64TESTL {
17853			break
17854		}
17855		_ = v_0.Args[1]
17856		v_0_0 := v_0.Args[0]
17857		v_0_1 := v_0.Args[1]
17858		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17859			s := v_0_0
17860			if s.Op != OpSelect0 {
17861				continue
17862			}
17863			blsr := s.Args[0]
17864			if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
17865				continue
17866			}
17867			v.reset(OpAMD64SETEQ)
17868			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
17869			v0.AddArg(blsr)
17870			v.AddArg(v0)
17871			return true
17872		}
17873		break
17874	}
17875	return false
17876}
17877func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
17878	v_2 := v.Args[2]
17879	v_1 := v.Args[1]
17880	v_0 := v.Args[0]
17881	b := v.Block
17882	typ := &b.Func.Config.Types
17883	// match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
17884	// result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
17885	for {
17886		off := auxIntToInt32(v.AuxInt)
17887		sym := auxToSym(v.Aux)
17888		ptr := v_0
17889		if v_1.Op != OpAMD64TESTL {
17890			break
17891		}
17892		_ = v_1.Args[1]
17893		v_1_0 := v_1.Args[0]
17894		v_1_1 := v_1.Args[1]
17895		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17896			if v_1_0.Op != OpAMD64SHLL {
17897				continue
17898			}
17899			x := v_1_0.Args[1]
17900			v_1_0_0 := v_1_0.Args[0]
17901			if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
17902				continue
17903			}
17904			y := v_1_1
17905			mem := v_2
17906			v.reset(OpAMD64SETAEstore)
17907			v.AuxInt = int32ToAuxInt(off)
17908			v.Aux = symToAux(sym)
17909			v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17910			v0.AddArg2(x, y)
17911			v.AddArg3(ptr, v0, mem)
17912			return true
17913		}
17914		break
17915	}
17916	// match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
17917	// result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
17918	for {
17919		off := auxIntToInt32(v.AuxInt)
17920		sym := auxToSym(v.Aux)
17921		ptr := v_0
17922		if v_1.Op != OpAMD64TESTQ {
17923			break
17924		}
17925		_ = v_1.Args[1]
17926		v_1_0 := v_1.Args[0]
17927		v_1_1 := v_1.Args[1]
17928		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17929			if v_1_0.Op != OpAMD64SHLQ {
17930				continue
17931			}
17932			x := v_1_0.Args[1]
17933			v_1_0_0 := v_1_0.Args[0]
17934			if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
17935				continue
17936			}
17937			y := v_1_1
17938			mem := v_2
17939			v.reset(OpAMD64SETAEstore)
17940			v.AuxInt = int32ToAuxInt(off)
17941			v.Aux = symToAux(sym)
17942			v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
17943			v0.AddArg2(x, y)
17944			v.AddArg3(ptr, v0, mem)
17945			return true
17946		}
17947		break
17948	}
17949	// match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
17950	// cond: isUint32PowerOfTwo(int64(c))
17951	// result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
17952	for {
17953		off := auxIntToInt32(v.AuxInt)
17954		sym := auxToSym(v.Aux)
17955		ptr := v_0
17956		if v_1.Op != OpAMD64TESTLconst {
17957			break
17958		}
17959		c := auxIntToInt32(v_1.AuxInt)
17960		x := v_1.Args[0]
17961		mem := v_2
17962		if !(isUint32PowerOfTwo(int64(c))) {
17963			break
17964		}
17965		v.reset(OpAMD64SETAEstore)
17966		v.AuxInt = int32ToAuxInt(off)
17967		v.Aux = symToAux(sym)
17968		v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17969		v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17970		v0.AddArg(x)
17971		v.AddArg3(ptr, v0, mem)
17972		return true
17973	}
17974	// match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem)
17975	// cond: isUint64PowerOfTwo(int64(c))
17976	// result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
17977	for {
17978		off := auxIntToInt32(v.AuxInt)
17979		sym := auxToSym(v.Aux)
17980		ptr := v_0
17981		if v_1.Op != OpAMD64TESTQconst {
17982			break
17983		}
17984		c := auxIntToInt32(v_1.AuxInt)
17985		x := v_1.Args[0]
17986		mem := v_2
17987		if !(isUint64PowerOfTwo(int64(c))) {
17988			break
17989		}
17990		v.reset(OpAMD64SETAEstore)
17991		v.AuxInt = int32ToAuxInt(off)
17992		v.Aux = symToAux(sym)
17993		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17994		v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17995		v0.AddArg(x)
17996		v.AddArg3(ptr, v0, mem)
17997		return true
17998	}
17999	// match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
18000	// cond: isUint64PowerOfTwo(c)
18001	// result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
18002	for {
18003		off := auxIntToInt32(v.AuxInt)
18004		sym := auxToSym(v.Aux)
18005		ptr := v_0
18006		if v_1.Op != OpAMD64TESTQ {
18007			break
18008		}
18009		_ = v_1.Args[1]
18010		v_1_0 := v_1.Args[0]
18011		v_1_1 := v_1.Args[1]
18012		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18013			if v_1_0.Op != OpAMD64MOVQconst {
18014				continue
18015			}
18016			c := auxIntToInt64(v_1_0.AuxInt)
18017			x := v_1_1
18018			mem := v_2
18019			if !(isUint64PowerOfTwo(c)) {
18020				continue
18021			}
18022			v.reset(OpAMD64SETAEstore)
18023			v.AuxInt = int32ToAuxInt(off)
18024			v.Aux = symToAux(sym)
18025			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18026			v0.AuxInt = int8ToAuxInt(int8(log64(c)))
18027			v0.AddArg(x)
18028			v.AddArg3(ptr, v0, mem)
18029			return true
18030		}
18031		break
18032	}
18033	// match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
18034	// result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem)
18035	for {
18036		off := auxIntToInt32(v.AuxInt)
18037		sym := auxToSym(v.Aux)
18038		ptr := v_0
18039		if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
18040			break
18041		}
18042		s := v_1.Args[0]
18043		if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
18044			break
18045		}
18046		mem := v_2
18047		v.reset(OpAMD64SETNEstore)
18048		v.AuxInt = int32ToAuxInt(off)
18049		v.Aux = symToAux(sym)
18050		v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
18051		v0.AuxInt = int32ToAuxInt(0)
18052		v0.AddArg(s)
18053		v.AddArg3(ptr, v0, mem)
18054		return true
18055	}
18056	// match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
18057	// result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem)
18058	for {
18059		off := auxIntToInt32(v.AuxInt)
18060		sym := auxToSym(v.Aux)
18061		ptr := v_0
18062		if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
18063			break
18064		}
18065		s := v_1.Args[0]
18066		if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
18067			break
18068		}
18069		mem := v_2
18070		v.reset(OpAMD64SETNEstore)
18071		v.AuxInt = int32ToAuxInt(off)
18072		v.Aux = symToAux(sym)
18073		v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
18074		v0.AuxInt = int32ToAuxInt(0)
18075		v0.AddArg(s)
18076		v.AddArg3(ptr, v0, mem)
18077		return true
18078	}
18079	// match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
18080	// cond: z1==z2
18081	// result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
18082	for {
18083		off := auxIntToInt32(v.AuxInt)
18084		sym := auxToSym(v.Aux)
18085		ptr := v_0
18086		if v_1.Op != OpAMD64TESTQ {
18087			break
18088		}
18089		_ = v_1.Args[1]
18090		v_1_0 := v_1.Args[0]
18091		v_1_1 := v_1.Args[1]
18092		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18093			z1 := v_1_0
18094			if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
18095				continue
18096			}
18097			z1_0 := z1.Args[0]
18098			if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18099				continue
18100			}
18101			x := z1_0.Args[0]
18102			z2 := v_1_1
18103			mem := v_2
18104			if !(z1 == z2) {
18105				continue
18106			}
18107			v.reset(OpAMD64SETAEstore)
18108			v.AuxInt = int32ToAuxInt(off)
18109			v.Aux = symToAux(sym)
18110			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18111			v0.AuxInt = int8ToAuxInt(63)
18112			v0.AddArg(x)
18113			v.AddArg3(ptr, v0, mem)
18114			return true
18115		}
18116		break
18117	}
18118	// match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
18119	// cond: z1==z2
18120	// result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
18121	for {
18122		off := auxIntToInt32(v.AuxInt)
18123		sym := auxToSym(v.Aux)
18124		ptr := v_0
18125		if v_1.Op != OpAMD64TESTL {
18126			break
18127		}
18128		_ = v_1.Args[1]
18129		v_1_0 := v_1.Args[0]
18130		v_1_1 := v_1.Args[1]
18131		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18132			z1 := v_1_0
18133			if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
18134				continue
18135			}
18136			z1_0 := z1.Args[0]
18137			if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18138				continue
18139			}
18140			x := z1_0.Args[0]
18141			z2 := v_1_1
18142			mem := v_2
18143			if !(z1 == z2) {
18144				continue
18145			}
18146			v.reset(OpAMD64SETAEstore)
18147			v.AuxInt = int32ToAuxInt(off)
18148			v.Aux = symToAux(sym)
18149			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18150			v0.AuxInt = int8ToAuxInt(31)
18151			v0.AddArg(x)
18152			v.AddArg3(ptr, v0, mem)
18153			return true
18154		}
18155		break
18156	}
18157	// match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
18158	// cond: z1==z2
18159	// result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem)
18160	for {
18161		off := auxIntToInt32(v.AuxInt)
18162		sym := auxToSym(v.Aux)
18163		ptr := v_0
18164		if v_1.Op != OpAMD64TESTQ {
18165			break
18166		}
18167		_ = v_1.Args[1]
18168		v_1_0 := v_1.Args[0]
18169		v_1_1 := v_1.Args[1]
18170		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18171			z1 := v_1_0
18172			if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18173				continue
18174			}
18175			z1_0 := z1.Args[0]
18176			if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18177				continue
18178			}
18179			x := z1_0.Args[0]
18180			z2 := v_1_1
18181			mem := v_2
18182			if !(z1 == z2) {
18183				continue
18184			}
18185			v.reset(OpAMD64SETAEstore)
18186			v.AuxInt = int32ToAuxInt(off)
18187			v.Aux = symToAux(sym)
18188			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18189			v0.AuxInt = int8ToAuxInt(0)
18190			v0.AddArg(x)
18191			v.AddArg3(ptr, v0, mem)
18192			return true
18193		}
18194		break
18195	}
18196	// match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
18197	// cond: z1==z2
18198	// result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem)
18199	for {
18200		off := auxIntToInt32(v.AuxInt)
18201		sym := auxToSym(v.Aux)
18202		ptr := v_0
18203		if v_1.Op != OpAMD64TESTL {
18204			break
18205		}
18206		_ = v_1.Args[1]
18207		v_1_0 := v_1.Args[0]
18208		v_1_1 := v_1.Args[1]
18209		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18210			z1 := v_1_0
18211			if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18212				continue
18213			}
18214			z1_0 := z1.Args[0]
18215			if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18216				continue
18217			}
18218			x := z1_0.Args[0]
18219			z2 := v_1_1
18220			mem := v_2
18221			if !(z1 == z2) {
18222				continue
18223			}
18224			v.reset(OpAMD64SETAEstore)
18225			v.AuxInt = int32ToAuxInt(off)
18226			v.Aux = symToAux(sym)
18227			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18228			v0.AuxInt = int8ToAuxInt(0)
18229			v0.AddArg(x)
18230			v.AddArg3(ptr, v0, mem)
18231			return true
18232		}
18233		break
18234	}
18235	// match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
18236	// cond: z1==z2
18237	// result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
18238	for {
18239		off := auxIntToInt32(v.AuxInt)
18240		sym := auxToSym(v.Aux)
18241		ptr := v_0
18242		if v_1.Op != OpAMD64TESTQ {
18243			break
18244		}
18245		_ = v_1.Args[1]
18246		v_1_0 := v_1.Args[0]
18247		v_1_1 := v_1.Args[1]
18248		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18249			z1 := v_1_0
18250			if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18251				continue
18252			}
18253			x := z1.Args[0]
18254			z2 := v_1_1
18255			mem := v_2
18256			if !(z1 == z2) {
18257				continue
18258			}
18259			v.reset(OpAMD64SETAEstore)
18260			v.AuxInt = int32ToAuxInt(off)
18261			v.Aux = symToAux(sym)
18262			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18263			v0.AuxInt = int8ToAuxInt(63)
18264			v0.AddArg(x)
18265			v.AddArg3(ptr, v0, mem)
18266			return true
18267		}
18268		break
18269	}
18270	// match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
18271	// cond: z1==z2
18272	// result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
18273	for {
18274		off := auxIntToInt32(v.AuxInt)
18275		sym := auxToSym(v.Aux)
18276		ptr := v_0
18277		if v_1.Op != OpAMD64TESTL {
18278			break
18279		}
18280		_ = v_1.Args[1]
18281		v_1_0 := v_1.Args[0]
18282		v_1_1 := v_1.Args[1]
18283		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18284			z1 := v_1_0
18285			if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18286				continue
18287			}
18288			x := z1.Args[0]
18289			z2 := v_1_1
18290			mem := v_2
18291			if !(z1 == z2) {
18292				continue
18293			}
18294			v.reset(OpAMD64SETAEstore)
18295			v.AuxInt = int32ToAuxInt(off)
18296			v.Aux = symToAux(sym)
18297			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18298			v0.AuxInt = int8ToAuxInt(31)
18299			v0.AddArg(x)
18300			v.AddArg3(ptr, v0, mem)
18301			return true
18302		}
18303		break
18304	}
18305	// match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem)
18306	// result: (SETEQstore [off] {sym} ptr x mem)
18307	for {
18308		off := auxIntToInt32(v.AuxInt)
18309		sym := auxToSym(v.Aux)
18310		ptr := v_0
18311		if v_1.Op != OpAMD64InvertFlags {
18312			break
18313		}
18314		x := v_1.Args[0]
18315		mem := v_2
18316		v.reset(OpAMD64SETEQstore)
18317		v.AuxInt = int32ToAuxInt(off)
18318		v.Aux = symToAux(sym)
18319		v.AddArg3(ptr, x, mem)
18320		return true
18321	}
18322	// match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem)
18323	// cond: is32Bit(int64(off1)+int64(off2))
18324	// result: (SETEQstore [off1+off2] {sym} base val mem)
18325	for {
18326		off1 := auxIntToInt32(v.AuxInt)
18327		sym := auxToSym(v.Aux)
18328		if v_0.Op != OpAMD64ADDQconst {
18329			break
18330		}
18331		off2 := auxIntToInt32(v_0.AuxInt)
18332		base := v_0.Args[0]
18333		val := v_1
18334		mem := v_2
18335		if !(is32Bit(int64(off1) + int64(off2))) {
18336			break
18337		}
18338		v.reset(OpAMD64SETEQstore)
18339		v.AuxInt = int32ToAuxInt(off1 + off2)
18340		v.Aux = symToAux(sym)
18341		v.AddArg3(base, val, mem)
18342		return true
18343	}
18344	// match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
18345	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
18346	// result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
18347	for {
18348		off1 := auxIntToInt32(v.AuxInt)
18349		sym1 := auxToSym(v.Aux)
18350		if v_0.Op != OpAMD64LEAQ {
18351			break
18352		}
18353		off2 := auxIntToInt32(v_0.AuxInt)
18354		sym2 := auxToSym(v_0.Aux)
18355		base := v_0.Args[0]
18356		val := v_1
18357		mem := v_2
18358		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18359			break
18360		}
18361		v.reset(OpAMD64SETEQstore)
18362		v.AuxInt = int32ToAuxInt(off1 + off2)
18363		v.Aux = symToAux(mergeSym(sym1, sym2))
18364		v.AddArg3(base, val, mem)
18365		return true
18366	}
18367	// match: (SETEQstore [off] {sym} ptr (FlagEQ) mem)
18368	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
18369	for {
18370		off := auxIntToInt32(v.AuxInt)
18371		sym := auxToSym(v.Aux)
18372		ptr := v_0
18373		if v_1.Op != OpAMD64FlagEQ {
18374			break
18375		}
18376		mem := v_2
18377		v.reset(OpAMD64MOVBstore)
18378		v.AuxInt = int32ToAuxInt(off)
18379		v.Aux = symToAux(sym)
18380		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18381		v0.AuxInt = int32ToAuxInt(1)
18382		v.AddArg3(ptr, v0, mem)
18383		return true
18384	}
18385	// match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem)
18386	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
18387	for {
18388		off := auxIntToInt32(v.AuxInt)
18389		sym := auxToSym(v.Aux)
18390		ptr := v_0
18391		if v_1.Op != OpAMD64FlagLT_ULT {
18392			break
18393		}
18394		mem := v_2
18395		v.reset(OpAMD64MOVBstore)
18396		v.AuxInt = int32ToAuxInt(off)
18397		v.Aux = symToAux(sym)
18398		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18399		v0.AuxInt = int32ToAuxInt(0)
18400		v.AddArg3(ptr, v0, mem)
18401		return true
18402	}
18403	// match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem)
18404	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
18405	for {
18406		off := auxIntToInt32(v.AuxInt)
18407		sym := auxToSym(v.Aux)
18408		ptr := v_0
18409		if v_1.Op != OpAMD64FlagLT_UGT {
18410			break
18411		}
18412		mem := v_2
18413		v.reset(OpAMD64MOVBstore)
18414		v.AuxInt = int32ToAuxInt(off)
18415		v.Aux = symToAux(sym)
18416		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18417		v0.AuxInt = int32ToAuxInt(0)
18418		v.AddArg3(ptr, v0, mem)
18419		return true
18420	}
18421	// match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem)
18422	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
18423	for {
18424		off := auxIntToInt32(v.AuxInt)
18425		sym := auxToSym(v.Aux)
18426		ptr := v_0
18427		if v_1.Op != OpAMD64FlagGT_ULT {
18428			break
18429		}
18430		mem := v_2
18431		v.reset(OpAMD64MOVBstore)
18432		v.AuxInt = int32ToAuxInt(off)
18433		v.Aux = symToAux(sym)
18434		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18435		v0.AuxInt = int32ToAuxInt(0)
18436		v.AddArg3(ptr, v0, mem)
18437		return true
18438	}
18439	// match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem)
18440	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
18441	for {
18442		off := auxIntToInt32(v.AuxInt)
18443		sym := auxToSym(v.Aux)
18444		ptr := v_0
18445		if v_1.Op != OpAMD64FlagGT_UGT {
18446			break
18447		}
18448		mem := v_2
18449		v.reset(OpAMD64MOVBstore)
18450		v.AuxInt = int32ToAuxInt(off)
18451		v.Aux = symToAux(sym)
18452		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18453		v0.AuxInt = int32ToAuxInt(0)
18454		v.AddArg3(ptr, v0, mem)
18455		return true
18456	}
18457	return false
18458}
18459func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
18460	v_0 := v.Args[0]
18461	// match: (SETG (InvertFlags x))
18462	// result: (SETL x)
18463	for {
18464		if v_0.Op != OpAMD64InvertFlags {
18465			break
18466		}
18467		x := v_0.Args[0]
18468		v.reset(OpAMD64SETL)
18469		v.AddArg(x)
18470		return true
18471	}
18472	// match: (SETG (FlagEQ))
18473	// result: (MOVLconst [0])
18474	for {
18475		if v_0.Op != OpAMD64FlagEQ {
18476			break
18477		}
18478		v.reset(OpAMD64MOVLconst)
18479		v.AuxInt = int32ToAuxInt(0)
18480		return true
18481	}
18482	// match: (SETG (FlagLT_ULT))
18483	// result: (MOVLconst [0])
18484	for {
18485		if v_0.Op != OpAMD64FlagLT_ULT {
18486			break
18487		}
18488		v.reset(OpAMD64MOVLconst)
18489		v.AuxInt = int32ToAuxInt(0)
18490		return true
18491	}
18492	// match: (SETG (FlagLT_UGT))
18493	// result: (MOVLconst [0])
18494	for {
18495		if v_0.Op != OpAMD64FlagLT_UGT {
18496			break
18497		}
18498		v.reset(OpAMD64MOVLconst)
18499		v.AuxInt = int32ToAuxInt(0)
18500		return true
18501	}
18502	// match: (SETG (FlagGT_ULT))
18503	// result: (MOVLconst [1])
18504	for {
18505		if v_0.Op != OpAMD64FlagGT_ULT {
18506			break
18507		}
18508		v.reset(OpAMD64MOVLconst)
18509		v.AuxInt = int32ToAuxInt(1)
18510		return true
18511	}
18512	// match: (SETG (FlagGT_UGT))
18513	// result: (MOVLconst [1])
18514	for {
18515		if v_0.Op != OpAMD64FlagGT_UGT {
18516			break
18517		}
18518		v.reset(OpAMD64MOVLconst)
18519		v.AuxInt = int32ToAuxInt(1)
18520		return true
18521	}
18522	return false
18523}
18524func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
18525	v_0 := v.Args[0]
18526	// match: (SETGE (InvertFlags x))
18527	// result: (SETLE x)
18528	for {
18529		if v_0.Op != OpAMD64InvertFlags {
18530			break
18531		}
18532		x := v_0.Args[0]
18533		v.reset(OpAMD64SETLE)
18534		v.AddArg(x)
18535		return true
18536	}
18537	// match: (SETGE (FlagEQ))
18538	// result: (MOVLconst [1])
18539	for {
18540		if v_0.Op != OpAMD64FlagEQ {
18541			break
18542		}
18543		v.reset(OpAMD64MOVLconst)
18544		v.AuxInt = int32ToAuxInt(1)
18545		return true
18546	}
18547	// match: (SETGE (FlagLT_ULT))
18548	// result: (MOVLconst [0])
18549	for {
18550		if v_0.Op != OpAMD64FlagLT_ULT {
18551			break
18552		}
18553		v.reset(OpAMD64MOVLconst)
18554		v.AuxInt = int32ToAuxInt(0)
18555		return true
18556	}
18557	// match: (SETGE (FlagLT_UGT))
18558	// result: (MOVLconst [0])
18559	for {
18560		if v_0.Op != OpAMD64FlagLT_UGT {
18561			break
18562		}
18563		v.reset(OpAMD64MOVLconst)
18564		v.AuxInt = int32ToAuxInt(0)
18565		return true
18566	}
18567	// match: (SETGE (FlagGT_ULT))
18568	// result: (MOVLconst [1])
18569	for {
18570		if v_0.Op != OpAMD64FlagGT_ULT {
18571			break
18572		}
18573		v.reset(OpAMD64MOVLconst)
18574		v.AuxInt = int32ToAuxInt(1)
18575		return true
18576	}
18577	// match: (SETGE (FlagGT_UGT))
18578	// result: (MOVLconst [1])
18579	for {
18580		if v_0.Op != OpAMD64FlagGT_UGT {
18581			break
18582		}
18583		v.reset(OpAMD64MOVLconst)
18584		v.AuxInt = int32ToAuxInt(1)
18585		return true
18586	}
18587	return false
18588}
18589func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
18590	v_2 := v.Args[2]
18591	v_1 := v.Args[1]
18592	v_0 := v.Args[0]
18593	b := v.Block
18594	typ := &b.Func.Config.Types
18595	// match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem)
18596	// result: (SETLEstore [off] {sym} ptr x mem)
18597	for {
18598		off := auxIntToInt32(v.AuxInt)
18599		sym := auxToSym(v.Aux)
18600		ptr := v_0
18601		if v_1.Op != OpAMD64InvertFlags {
18602			break
18603		}
18604		x := v_1.Args[0]
18605		mem := v_2
18606		v.reset(OpAMD64SETLEstore)
18607		v.AuxInt = int32ToAuxInt(off)
18608		v.Aux = symToAux(sym)
18609		v.AddArg3(ptr, x, mem)
18610		return true
18611	}
18612	// match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem)
18613	// cond: is32Bit(int64(off1)+int64(off2))
18614	// result: (SETGEstore [off1+off2] {sym} base val mem)
18615	for {
18616		off1 := auxIntToInt32(v.AuxInt)
18617		sym := auxToSym(v.Aux)
18618		if v_0.Op != OpAMD64ADDQconst {
18619			break
18620		}
18621		off2 := auxIntToInt32(v_0.AuxInt)
18622		base := v_0.Args[0]
18623		val := v_1
18624		mem := v_2
18625		if !(is32Bit(int64(off1) + int64(off2))) {
18626			break
18627		}
18628		v.reset(OpAMD64SETGEstore)
18629		v.AuxInt = int32ToAuxInt(off1 + off2)
18630		v.Aux = symToAux(sym)
18631		v.AddArg3(base, val, mem)
18632		return true
18633	}
18634	// match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
18635	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
18636	// result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
18637	for {
18638		off1 := auxIntToInt32(v.AuxInt)
18639		sym1 := auxToSym(v.Aux)
18640		if v_0.Op != OpAMD64LEAQ {
18641			break
18642		}
18643		off2 := auxIntToInt32(v_0.AuxInt)
18644		sym2 := auxToSym(v_0.Aux)
18645		base := v_0.Args[0]
18646		val := v_1
18647		mem := v_2
18648		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18649			break
18650		}
18651		v.reset(OpAMD64SETGEstore)
18652		v.AuxInt = int32ToAuxInt(off1 + off2)
18653		v.Aux = symToAux(mergeSym(sym1, sym2))
18654		v.AddArg3(base, val, mem)
18655		return true
18656	}
18657	// match: (SETGEstore [off] {sym} ptr (FlagEQ) mem)
18658	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
18659	for {
18660		off := auxIntToInt32(v.AuxInt)
18661		sym := auxToSym(v.Aux)
18662		ptr := v_0
18663		if v_1.Op != OpAMD64FlagEQ {
18664			break
18665		}
18666		mem := v_2
18667		v.reset(OpAMD64MOVBstore)
18668		v.AuxInt = int32ToAuxInt(off)
18669		v.Aux = symToAux(sym)
18670		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18671		v0.AuxInt = int32ToAuxInt(1)
18672		v.AddArg3(ptr, v0, mem)
18673		return true
18674	}
18675	// match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem)
18676	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
18677	for {
18678		off := auxIntToInt32(v.AuxInt)
18679		sym := auxToSym(v.Aux)
18680		ptr := v_0
18681		if v_1.Op != OpAMD64FlagLT_ULT {
18682			break
18683		}
18684		mem := v_2
18685		v.reset(OpAMD64MOVBstore)
18686		v.AuxInt = int32ToAuxInt(off)
18687		v.Aux = symToAux(sym)
18688		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18689		v0.AuxInt = int32ToAuxInt(0)
18690		v.AddArg3(ptr, v0, mem)
18691		return true
18692	}
18693	// match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem)
18694	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
18695	for {
18696		off := auxIntToInt32(v.AuxInt)
18697		sym := auxToSym(v.Aux)
18698		ptr := v_0
18699		if v_1.Op != OpAMD64FlagLT_UGT {
18700			break
18701		}
18702		mem := v_2
18703		v.reset(OpAMD64MOVBstore)
18704		v.AuxInt = int32ToAuxInt(off)
18705		v.Aux = symToAux(sym)
18706		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18707		v0.AuxInt = int32ToAuxInt(0)
18708		v.AddArg3(ptr, v0, mem)
18709		return true
18710	}
18711	// match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem)
18712	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
18713	for {
18714		off := auxIntToInt32(v.AuxInt)
18715		sym := auxToSym(v.Aux)
18716		ptr := v_0
18717		if v_1.Op != OpAMD64FlagGT_ULT {
18718			break
18719		}
18720		mem := v_2
18721		v.reset(OpAMD64MOVBstore)
18722		v.AuxInt = int32ToAuxInt(off)
18723		v.Aux = symToAux(sym)
18724		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18725		v0.AuxInt = int32ToAuxInt(1)
18726		v.AddArg3(ptr, v0, mem)
18727		return true
18728	}
18729	// match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem)
18730	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
18731	for {
18732		off := auxIntToInt32(v.AuxInt)
18733		sym := auxToSym(v.Aux)
18734		ptr := v_0
18735		if v_1.Op != OpAMD64FlagGT_UGT {
18736			break
18737		}
18738		mem := v_2
18739		v.reset(OpAMD64MOVBstore)
18740		v.AuxInt = int32ToAuxInt(off)
18741		v.Aux = symToAux(sym)
18742		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18743		v0.AuxInt = int32ToAuxInt(1)
18744		v.AddArg3(ptr, v0, mem)
18745		return true
18746	}
18747	return false
18748}
18749func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
18750	v_2 := v.Args[2]
18751	v_1 := v.Args[1]
18752	v_0 := v.Args[0]
18753	b := v.Block
18754	typ := &b.Func.Config.Types
18755	// match: (SETGstore [off] {sym} ptr (InvertFlags x) mem)
18756	// result: (SETLstore [off] {sym} ptr x mem)
18757	for {
18758		off := auxIntToInt32(v.AuxInt)
18759		sym := auxToSym(v.Aux)
18760		ptr := v_0
18761		if v_1.Op != OpAMD64InvertFlags {
18762			break
18763		}
18764		x := v_1.Args[0]
18765		mem := v_2
18766		v.reset(OpAMD64SETLstore)
18767		v.AuxInt = int32ToAuxInt(off)
18768		v.Aux = symToAux(sym)
18769		v.AddArg3(ptr, x, mem)
18770		return true
18771	}
18772	// match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem)
18773	// cond: is32Bit(int64(off1)+int64(off2))
18774	// result: (SETGstore [off1+off2] {sym} base val mem)
18775	for {
18776		off1 := auxIntToInt32(v.AuxInt)
18777		sym := auxToSym(v.Aux)
18778		if v_0.Op != OpAMD64ADDQconst {
18779			break
18780		}
18781		off2 := auxIntToInt32(v_0.AuxInt)
18782		base := v_0.Args[0]
18783		val := v_1
18784		mem := v_2
18785		if !(is32Bit(int64(off1) + int64(off2))) {
18786			break
18787		}
18788		v.reset(OpAMD64SETGstore)
18789		v.AuxInt = int32ToAuxInt(off1 + off2)
18790		v.Aux = symToAux(sym)
18791		v.AddArg3(base, val, mem)
18792		return true
18793	}
18794	// match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
18795	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
18796	// result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
18797	for {
18798		off1 := auxIntToInt32(v.AuxInt)
18799		sym1 := auxToSym(v.Aux)
18800		if v_0.Op != OpAMD64LEAQ {
18801			break
18802		}
18803		off2 := auxIntToInt32(v_0.AuxInt)
18804		sym2 := auxToSym(v_0.Aux)
18805		base := v_0.Args[0]
18806		val := v_1
18807		mem := v_2
18808		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18809			break
18810		}
18811		v.reset(OpAMD64SETGstore)
18812		v.AuxInt = int32ToAuxInt(off1 + off2)
18813		v.Aux = symToAux(mergeSym(sym1, sym2))
18814		v.AddArg3(base, val, mem)
18815		return true
18816	}
18817	// match: (SETGstore [off] {sym} ptr (FlagEQ) mem)
18818	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
18819	for {
18820		off := auxIntToInt32(v.AuxInt)
18821		sym := auxToSym(v.Aux)
18822		ptr := v_0
18823		if v_1.Op != OpAMD64FlagEQ {
18824			break
18825		}
18826		mem := v_2
18827		v.reset(OpAMD64MOVBstore)
18828		v.AuxInt = int32ToAuxInt(off)
18829		v.Aux = symToAux(sym)
18830		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18831		v0.AuxInt = int32ToAuxInt(0)
18832		v.AddArg3(ptr, v0, mem)
18833		return true
18834	}
18835	// match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem)
18836	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
18837	for {
18838		off := auxIntToInt32(v.AuxInt)
18839		sym := auxToSym(v.Aux)
18840		ptr := v_0
18841		if v_1.Op != OpAMD64FlagLT_ULT {
18842			break
18843		}
18844		mem := v_2
18845		v.reset(OpAMD64MOVBstore)
18846		v.AuxInt = int32ToAuxInt(off)
18847		v.Aux = symToAux(sym)
18848		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18849		v0.AuxInt = int32ToAuxInt(0)
18850		v.AddArg3(ptr, v0, mem)
18851		return true
18852	}
18853	// match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem)
18854	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
18855	for {
18856		off := auxIntToInt32(v.AuxInt)
18857		sym := auxToSym(v.Aux)
18858		ptr := v_0
18859		if v_1.Op != OpAMD64FlagLT_UGT {
18860			break
18861		}
18862		mem := v_2
18863		v.reset(OpAMD64MOVBstore)
18864		v.AuxInt = int32ToAuxInt(off)
18865		v.Aux = symToAux(sym)
18866		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18867		v0.AuxInt = int32ToAuxInt(0)
18868		v.AddArg3(ptr, v0, mem)
18869		return true
18870	}
18871	// match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem)
18872	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
18873	for {
18874		off := auxIntToInt32(v.AuxInt)
18875		sym := auxToSym(v.Aux)
18876		ptr := v_0
18877		if v_1.Op != OpAMD64FlagGT_ULT {
18878			break
18879		}
18880		mem := v_2
18881		v.reset(OpAMD64MOVBstore)
18882		v.AuxInt = int32ToAuxInt(off)
18883		v.Aux = symToAux(sym)
18884		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18885		v0.AuxInt = int32ToAuxInt(1)
18886		v.AddArg3(ptr, v0, mem)
18887		return true
18888	}
18889	// match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem)
18890	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
18891	for {
18892		off := auxIntToInt32(v.AuxInt)
18893		sym := auxToSym(v.Aux)
18894		ptr := v_0
18895		if v_1.Op != OpAMD64FlagGT_UGT {
18896			break
18897		}
18898		mem := v_2
18899		v.reset(OpAMD64MOVBstore)
18900		v.AuxInt = int32ToAuxInt(off)
18901		v.Aux = symToAux(sym)
18902		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18903		v0.AuxInt = int32ToAuxInt(1)
18904		v.AddArg3(ptr, v0, mem)
18905		return true
18906	}
18907	return false
18908}
18909func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
18910	v_0 := v.Args[0]
18911	// match: (SETL (InvertFlags x))
18912	// result: (SETG x)
18913	for {
18914		if v_0.Op != OpAMD64InvertFlags {
18915			break
18916		}
18917		x := v_0.Args[0]
18918		v.reset(OpAMD64SETG)
18919		v.AddArg(x)
18920		return true
18921	}
18922	// match: (SETL (FlagEQ))
18923	// result: (MOVLconst [0])
18924	for {
18925		if v_0.Op != OpAMD64FlagEQ {
18926			break
18927		}
18928		v.reset(OpAMD64MOVLconst)
18929		v.AuxInt = int32ToAuxInt(0)
18930		return true
18931	}
18932	// match: (SETL (FlagLT_ULT))
18933	// result: (MOVLconst [1])
18934	for {
18935		if v_0.Op != OpAMD64FlagLT_ULT {
18936			break
18937		}
18938		v.reset(OpAMD64MOVLconst)
18939		v.AuxInt = int32ToAuxInt(1)
18940		return true
18941	}
18942	// match: (SETL (FlagLT_UGT))
18943	// result: (MOVLconst [1])
18944	for {
18945		if v_0.Op != OpAMD64FlagLT_UGT {
18946			break
18947		}
18948		v.reset(OpAMD64MOVLconst)
18949		v.AuxInt = int32ToAuxInt(1)
18950		return true
18951	}
18952	// match: (SETL (FlagGT_ULT))
18953	// result: (MOVLconst [0])
18954	for {
18955		if v_0.Op != OpAMD64FlagGT_ULT {
18956			break
18957		}
18958		v.reset(OpAMD64MOVLconst)
18959		v.AuxInt = int32ToAuxInt(0)
18960		return true
18961	}
18962	// match: (SETL (FlagGT_UGT))
18963	// result: (MOVLconst [0])
18964	for {
18965		if v_0.Op != OpAMD64FlagGT_UGT {
18966			break
18967		}
18968		v.reset(OpAMD64MOVLconst)
18969		v.AuxInt = int32ToAuxInt(0)
18970		return true
18971	}
18972	return false
18973}
18974func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
18975	v_0 := v.Args[0]
18976	// match: (SETLE (InvertFlags x))
18977	// result: (SETGE x)
18978	for {
18979		if v_0.Op != OpAMD64InvertFlags {
18980			break
18981		}
18982		x := v_0.Args[0]
18983		v.reset(OpAMD64SETGE)
18984		v.AddArg(x)
18985		return true
18986	}
18987	// match: (SETLE (FlagEQ))
18988	// result: (MOVLconst [1])
18989	for {
18990		if v_0.Op != OpAMD64FlagEQ {
18991			break
18992		}
18993		v.reset(OpAMD64MOVLconst)
18994		v.AuxInt = int32ToAuxInt(1)
18995		return true
18996	}
18997	// match: (SETLE (FlagLT_ULT))
18998	// result: (MOVLconst [1])
18999	for {
19000		if v_0.Op != OpAMD64FlagLT_ULT {
19001			break
19002		}
19003		v.reset(OpAMD64MOVLconst)
19004		v.AuxInt = int32ToAuxInt(1)
19005		return true
19006	}
19007	// match: (SETLE (FlagLT_UGT))
19008	// result: (MOVLconst [1])
19009	for {
19010		if v_0.Op != OpAMD64FlagLT_UGT {
19011			break
19012		}
19013		v.reset(OpAMD64MOVLconst)
19014		v.AuxInt = int32ToAuxInt(1)
19015		return true
19016	}
19017	// match: (SETLE (FlagGT_ULT))
19018	// result: (MOVLconst [0])
19019	for {
19020		if v_0.Op != OpAMD64FlagGT_ULT {
19021			break
19022		}
19023		v.reset(OpAMD64MOVLconst)
19024		v.AuxInt = int32ToAuxInt(0)
19025		return true
19026	}
19027	// match: (SETLE (FlagGT_UGT))
19028	// result: (MOVLconst [0])
19029	for {
19030		if v_0.Op != OpAMD64FlagGT_UGT {
19031			break
19032		}
19033		v.reset(OpAMD64MOVLconst)
19034		v.AuxInt = int32ToAuxInt(0)
19035		return true
19036	}
19037	return false
19038}
19039func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
19040	v_2 := v.Args[2]
19041	v_1 := v.Args[1]
19042	v_0 := v.Args[0]
19043	b := v.Block
19044	typ := &b.Func.Config.Types
19045	// match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem)
19046	// result: (SETGEstore [off] {sym} ptr x mem)
19047	for {
19048		off := auxIntToInt32(v.AuxInt)
19049		sym := auxToSym(v.Aux)
19050		ptr := v_0
19051		if v_1.Op != OpAMD64InvertFlags {
19052			break
19053		}
19054		x := v_1.Args[0]
19055		mem := v_2
19056		v.reset(OpAMD64SETGEstore)
19057		v.AuxInt = int32ToAuxInt(off)
19058		v.Aux = symToAux(sym)
19059		v.AddArg3(ptr, x, mem)
19060		return true
19061	}
19062	// match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem)
19063	// cond: is32Bit(int64(off1)+int64(off2))
19064	// result: (SETLEstore [off1+off2] {sym} base val mem)
19065	for {
19066		off1 := auxIntToInt32(v.AuxInt)
19067		sym := auxToSym(v.Aux)
19068		if v_0.Op != OpAMD64ADDQconst {
19069			break
19070		}
19071		off2 := auxIntToInt32(v_0.AuxInt)
19072		base := v_0.Args[0]
19073		val := v_1
19074		mem := v_2
19075		if !(is32Bit(int64(off1) + int64(off2))) {
19076			break
19077		}
19078		v.reset(OpAMD64SETLEstore)
19079		v.AuxInt = int32ToAuxInt(off1 + off2)
19080		v.Aux = symToAux(sym)
19081		v.AddArg3(base, val, mem)
19082		return true
19083	}
19084	// match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
19085	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
19086	// result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
19087	for {
19088		off1 := auxIntToInt32(v.AuxInt)
19089		sym1 := auxToSym(v.Aux)
19090		if v_0.Op != OpAMD64LEAQ {
19091			break
19092		}
19093		off2 := auxIntToInt32(v_0.AuxInt)
19094		sym2 := auxToSym(v_0.Aux)
19095		base := v_0.Args[0]
19096		val := v_1
19097		mem := v_2
19098		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19099			break
19100		}
19101		v.reset(OpAMD64SETLEstore)
19102		v.AuxInt = int32ToAuxInt(off1 + off2)
19103		v.Aux = symToAux(mergeSym(sym1, sym2))
19104		v.AddArg3(base, val, mem)
19105		return true
19106	}
19107	// match: (SETLEstore [off] {sym} ptr (FlagEQ) mem)
19108	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
19109	for {
19110		off := auxIntToInt32(v.AuxInt)
19111		sym := auxToSym(v.Aux)
19112		ptr := v_0
19113		if v_1.Op != OpAMD64FlagEQ {
19114			break
19115		}
19116		mem := v_2
19117		v.reset(OpAMD64MOVBstore)
19118		v.AuxInt = int32ToAuxInt(off)
19119		v.Aux = symToAux(sym)
19120		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19121		v0.AuxInt = int32ToAuxInt(1)
19122		v.AddArg3(ptr, v0, mem)
19123		return true
19124	}
19125	// match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem)
19126	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
19127	for {
19128		off := auxIntToInt32(v.AuxInt)
19129		sym := auxToSym(v.Aux)
19130		ptr := v_0
19131		if v_1.Op != OpAMD64FlagLT_ULT {
19132			break
19133		}
19134		mem := v_2
19135		v.reset(OpAMD64MOVBstore)
19136		v.AuxInt = int32ToAuxInt(off)
19137		v.Aux = symToAux(sym)
19138		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19139		v0.AuxInt = int32ToAuxInt(1)
19140		v.AddArg3(ptr, v0, mem)
19141		return true
19142	}
19143	// match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem)
19144	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
19145	for {
19146		off := auxIntToInt32(v.AuxInt)
19147		sym := auxToSym(v.Aux)
19148		ptr := v_0
19149		if v_1.Op != OpAMD64FlagLT_UGT {
19150			break
19151		}
19152		mem := v_2
19153		v.reset(OpAMD64MOVBstore)
19154		v.AuxInt = int32ToAuxInt(off)
19155		v.Aux = symToAux(sym)
19156		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19157		v0.AuxInt = int32ToAuxInt(1)
19158		v.AddArg3(ptr, v0, mem)
19159		return true
19160	}
19161	// match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem)
19162	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
19163	for {
19164		off := auxIntToInt32(v.AuxInt)
19165		sym := auxToSym(v.Aux)
19166		ptr := v_0
19167		if v_1.Op != OpAMD64FlagGT_ULT {
19168			break
19169		}
19170		mem := v_2
19171		v.reset(OpAMD64MOVBstore)
19172		v.AuxInt = int32ToAuxInt(off)
19173		v.Aux = symToAux(sym)
19174		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19175		v0.AuxInt = int32ToAuxInt(0)
19176		v.AddArg3(ptr, v0, mem)
19177		return true
19178	}
19179	// match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem)
19180	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
19181	for {
19182		off := auxIntToInt32(v.AuxInt)
19183		sym := auxToSym(v.Aux)
19184		ptr := v_0
19185		if v_1.Op != OpAMD64FlagGT_UGT {
19186			break
19187		}
19188		mem := v_2
19189		v.reset(OpAMD64MOVBstore)
19190		v.AuxInt = int32ToAuxInt(off)
19191		v.Aux = symToAux(sym)
19192		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19193		v0.AuxInt = int32ToAuxInt(0)
19194		v.AddArg3(ptr, v0, mem)
19195		return true
19196	}
19197	return false
19198}
19199func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
19200	v_2 := v.Args[2]
19201	v_1 := v.Args[1]
19202	v_0 := v.Args[0]
19203	b := v.Block
19204	typ := &b.Func.Config.Types
19205	// match: (SETLstore [off] {sym} ptr (InvertFlags x) mem)
19206	// result: (SETGstore [off] {sym} ptr x mem)
19207	for {
19208		off := auxIntToInt32(v.AuxInt)
19209		sym := auxToSym(v.Aux)
19210		ptr := v_0
19211		if v_1.Op != OpAMD64InvertFlags {
19212			break
19213		}
19214		x := v_1.Args[0]
19215		mem := v_2
19216		v.reset(OpAMD64SETGstore)
19217		v.AuxInt = int32ToAuxInt(off)
19218		v.Aux = symToAux(sym)
19219		v.AddArg3(ptr, x, mem)
19220		return true
19221	}
19222	// match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem)
19223	// cond: is32Bit(int64(off1)+int64(off2))
19224	// result: (SETLstore [off1+off2] {sym} base val mem)
19225	for {
19226		off1 := auxIntToInt32(v.AuxInt)
19227		sym := auxToSym(v.Aux)
19228		if v_0.Op != OpAMD64ADDQconst {
19229			break
19230		}
19231		off2 := auxIntToInt32(v_0.AuxInt)
19232		base := v_0.Args[0]
19233		val := v_1
19234		mem := v_2
19235		if !(is32Bit(int64(off1) + int64(off2))) {
19236			break
19237		}
19238		v.reset(OpAMD64SETLstore)
19239		v.AuxInt = int32ToAuxInt(off1 + off2)
19240		v.Aux = symToAux(sym)
19241		v.AddArg3(base, val, mem)
19242		return true
19243	}
19244	// match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
19245	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
19246	// result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
19247	for {
19248		off1 := auxIntToInt32(v.AuxInt)
19249		sym1 := auxToSym(v.Aux)
19250		if v_0.Op != OpAMD64LEAQ {
19251			break
19252		}
19253		off2 := auxIntToInt32(v_0.AuxInt)
19254		sym2 := auxToSym(v_0.Aux)
19255		base := v_0.Args[0]
19256		val := v_1
19257		mem := v_2
19258		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19259			break
19260		}
19261		v.reset(OpAMD64SETLstore)
19262		v.AuxInt = int32ToAuxInt(off1 + off2)
19263		v.Aux = symToAux(mergeSym(sym1, sym2))
19264		v.AddArg3(base, val, mem)
19265		return true
19266	}
19267	// match: (SETLstore [off] {sym} ptr (FlagEQ) mem)
19268	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
19269	for {
19270		off := auxIntToInt32(v.AuxInt)
19271		sym := auxToSym(v.Aux)
19272		ptr := v_0
19273		if v_1.Op != OpAMD64FlagEQ {
19274			break
19275		}
19276		mem := v_2
19277		v.reset(OpAMD64MOVBstore)
19278		v.AuxInt = int32ToAuxInt(off)
19279		v.Aux = symToAux(sym)
19280		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19281		v0.AuxInt = int32ToAuxInt(0)
19282		v.AddArg3(ptr, v0, mem)
19283		return true
19284	}
19285	// match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem)
19286	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
19287	for {
19288		off := auxIntToInt32(v.AuxInt)
19289		sym := auxToSym(v.Aux)
19290		ptr := v_0
19291		if v_1.Op != OpAMD64FlagLT_ULT {
19292			break
19293		}
19294		mem := v_2
19295		v.reset(OpAMD64MOVBstore)
19296		v.AuxInt = int32ToAuxInt(off)
19297		v.Aux = symToAux(sym)
19298		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19299		v0.AuxInt = int32ToAuxInt(1)
19300		v.AddArg3(ptr, v0, mem)
19301		return true
19302	}
19303	// match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem)
19304	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
19305	for {
19306		off := auxIntToInt32(v.AuxInt)
19307		sym := auxToSym(v.Aux)
19308		ptr := v_0
19309		if v_1.Op != OpAMD64FlagLT_UGT {
19310			break
19311		}
19312		mem := v_2
19313		v.reset(OpAMD64MOVBstore)
19314		v.AuxInt = int32ToAuxInt(off)
19315		v.Aux = symToAux(sym)
19316		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19317		v0.AuxInt = int32ToAuxInt(1)
19318		v.AddArg3(ptr, v0, mem)
19319		return true
19320	}
19321	// match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem)
19322	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
19323	for {
19324		off := auxIntToInt32(v.AuxInt)
19325		sym := auxToSym(v.Aux)
19326		ptr := v_0
19327		if v_1.Op != OpAMD64FlagGT_ULT {
19328			break
19329		}
19330		mem := v_2
19331		v.reset(OpAMD64MOVBstore)
19332		v.AuxInt = int32ToAuxInt(off)
19333		v.Aux = symToAux(sym)
19334		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19335		v0.AuxInt = int32ToAuxInt(0)
19336		v.AddArg3(ptr, v0, mem)
19337		return true
19338	}
19339	// match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem)
19340	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
19341	for {
19342		off := auxIntToInt32(v.AuxInt)
19343		sym := auxToSym(v.Aux)
19344		ptr := v_0
19345		if v_1.Op != OpAMD64FlagGT_UGT {
19346			break
19347		}
19348		mem := v_2
19349		v.reset(OpAMD64MOVBstore)
19350		v.AuxInt = int32ToAuxInt(off)
19351		v.Aux = symToAux(sym)
19352		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19353		v0.AuxInt = int32ToAuxInt(0)
19354		v.AddArg3(ptr, v0, mem)
19355		return true
19356	}
19357	return false
19358}
19359func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
19360	v_0 := v.Args[0]
19361	b := v.Block
19362	// match: (SETNE (TESTBconst [1] x))
19363	// result: (ANDLconst [1] x)
19364	for {
19365		if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
19366			break
19367		}
19368		x := v_0.Args[0]
19369		v.reset(OpAMD64ANDLconst)
19370		v.AuxInt = int32ToAuxInt(1)
19371		v.AddArg(x)
19372		return true
19373	}
19374	// match: (SETNE (TESTWconst [1] x))
19375	// result: (ANDLconst [1] x)
19376	for {
19377		if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
19378			break
19379		}
19380		x := v_0.Args[0]
19381		v.reset(OpAMD64ANDLconst)
19382		v.AuxInt = int32ToAuxInt(1)
19383		v.AddArg(x)
19384		return true
19385	}
19386	// match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y))
19387	// result: (SETB (BTL x y))
19388	for {
19389		if v_0.Op != OpAMD64TESTL {
19390			break
19391		}
19392		_ = v_0.Args[1]
19393		v_0_0 := v_0.Args[0]
19394		v_0_1 := v_0.Args[1]
19395		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19396			if v_0_0.Op != OpAMD64SHLL {
19397				continue
19398			}
19399			x := v_0_0.Args[1]
19400			v_0_0_0 := v_0_0.Args[0]
19401			if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
19402				continue
19403			}
19404			y := v_0_1
19405			v.reset(OpAMD64SETB)
19406			v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
19407			v0.AddArg2(x, y)
19408			v.AddArg(v0)
19409			return true
19410		}
19411		break
19412	}
19413	// match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y))
19414	// result: (SETB (BTQ x y))
19415	for {
19416		if v_0.Op != OpAMD64TESTQ {
19417			break
19418		}
19419		_ = v_0.Args[1]
19420		v_0_0 := v_0.Args[0]
19421		v_0_1 := v_0.Args[1]
19422		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19423			if v_0_0.Op != OpAMD64SHLQ {
19424				continue
19425			}
19426			x := v_0_0.Args[1]
19427			v_0_0_0 := v_0_0.Args[0]
19428			if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
19429				continue
19430			}
19431			y := v_0_1
19432			v.reset(OpAMD64SETB)
19433			v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
19434			v0.AddArg2(x, y)
19435			v.AddArg(v0)
19436			return true
19437		}
19438		break
19439	}
19440	// match: (SETNE (TESTLconst [c] x))
19441	// cond: isUint32PowerOfTwo(int64(c))
19442	// result: (SETB (BTLconst [int8(log32(c))] x))
19443	for {
19444		if v_0.Op != OpAMD64TESTLconst {
19445			break
19446		}
19447		c := auxIntToInt32(v_0.AuxInt)
19448		x := v_0.Args[0]
19449		if !(isUint32PowerOfTwo(int64(c))) {
19450			break
19451		}
19452		v.reset(OpAMD64SETB)
19453		v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19454		v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19455		v0.AddArg(x)
19456		v.AddArg(v0)
19457		return true
19458	}
19459	// match: (SETNE (TESTQconst [c] x))
19460	// cond: isUint64PowerOfTwo(int64(c))
19461	// result: (SETB (BTQconst [int8(log32(c))] x))
19462	for {
19463		if v_0.Op != OpAMD64TESTQconst {
19464			break
19465		}
19466		c := auxIntToInt32(v_0.AuxInt)
19467		x := v_0.Args[0]
19468		if !(isUint64PowerOfTwo(int64(c))) {
19469			break
19470		}
19471		v.reset(OpAMD64SETB)
19472		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19473		v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19474		v0.AddArg(x)
19475		v.AddArg(v0)
19476		return true
19477	}
19478	// match: (SETNE (TESTQ (MOVQconst [c]) x))
19479	// cond: isUint64PowerOfTwo(c)
19480	// result: (SETB (BTQconst [int8(log64(c))] x))
19481	for {
19482		if v_0.Op != OpAMD64TESTQ {
19483			break
19484		}
19485		_ = v_0.Args[1]
19486		v_0_0 := v_0.Args[0]
19487		v_0_1 := v_0.Args[1]
19488		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19489			if v_0_0.Op != OpAMD64MOVQconst {
19490				continue
19491			}
19492			c := auxIntToInt64(v_0_0.AuxInt)
19493			x := v_0_1
19494			if !(isUint64PowerOfTwo(c)) {
19495				continue
19496			}
19497			v.reset(OpAMD64SETB)
19498			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19499			v0.AuxInt = int8ToAuxInt(int8(log64(c)))
19500			v0.AddArg(x)
19501			v.AddArg(v0)
19502			return true
19503		}
19504		break
19505	}
19506	// match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _)))
19507	// result: (SETEQ (CMPLconst [0] s))
19508	for {
19509		if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
19510			break
19511		}
19512		s := v_0.Args[0]
19513		if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
19514			break
19515		}
19516		v.reset(OpAMD64SETEQ)
19517		v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
19518		v0.AuxInt = int32ToAuxInt(0)
19519		v0.AddArg(s)
19520		v.AddArg(v0)
19521		return true
19522	}
19523	// match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _)))
19524	// result: (SETEQ (CMPQconst [0] s))
19525	for {
19526		if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
19527			break
19528		}
19529		s := v_0.Args[0]
19530		if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
19531			break
19532		}
19533		v.reset(OpAMD64SETEQ)
19534		v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
19535		v0.AuxInt = int32ToAuxInt(0)
19536		v0.AddArg(s)
19537		v.AddArg(v0)
19538		return true
19539	}
19540	// match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
19541	// cond: z1==z2
19542	// result: (SETB (BTQconst [63] x))
19543	for {
19544		if v_0.Op != OpAMD64TESTQ {
19545			break
19546		}
19547		_ = v_0.Args[1]
19548		v_0_0 := v_0.Args[0]
19549		v_0_1 := v_0.Args[1]
19550		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19551			z1 := v_0_0
19552			if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
19553				continue
19554			}
19555			z1_0 := z1.Args[0]
19556			if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19557				continue
19558			}
19559			x := z1_0.Args[0]
19560			z2 := v_0_1
19561			if !(z1 == z2) {
19562				continue
19563			}
19564			v.reset(OpAMD64SETB)
19565			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19566			v0.AuxInt = int8ToAuxInt(63)
19567			v0.AddArg(x)
19568			v.AddArg(v0)
19569			return true
19570		}
19571		break
19572	}
19573	// match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
19574	// cond: z1==z2
19575	// result: (SETB (BTQconst [31] x))
19576	for {
19577		if v_0.Op != OpAMD64TESTL {
19578			break
19579		}
19580		_ = v_0.Args[1]
19581		v_0_0 := v_0.Args[0]
19582		v_0_1 := v_0.Args[1]
19583		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19584			z1 := v_0_0
19585			if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
19586				continue
19587			}
19588			z1_0 := z1.Args[0]
19589			if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
19590				continue
19591			}
19592			x := z1_0.Args[0]
19593			z2 := v_0_1
19594			if !(z1 == z2) {
19595				continue
19596			}
19597			v.reset(OpAMD64SETB)
19598			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19599			v0.AuxInt = int8ToAuxInt(31)
19600			v0.AddArg(x)
19601			v.AddArg(v0)
19602			return true
19603		}
19604		break
19605	}
19606	// match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
19607	// cond: z1==z2
19608	// result: (SETB (BTQconst [0] x))
19609	for {
19610		if v_0.Op != OpAMD64TESTQ {
19611			break
19612		}
19613		_ = v_0.Args[1]
19614		v_0_0 := v_0.Args[0]
19615		v_0_1 := v_0.Args[1]
19616		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19617			z1 := v_0_0
19618			if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
19619				continue
19620			}
19621			z1_0 := z1.Args[0]
19622			if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19623				continue
19624			}
19625			x := z1_0.Args[0]
19626			z2 := v_0_1
19627			if !(z1 == z2) {
19628				continue
19629			}
19630			v.reset(OpAMD64SETB)
19631			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19632			v0.AuxInt = int8ToAuxInt(0)
19633			v0.AddArg(x)
19634			v.AddArg(v0)
19635			return true
19636		}
19637		break
19638	}
19639	// match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
19640	// cond: z1==z2
19641	// result: (SETB (BTLconst [0] x))
19642	for {
19643		if v_0.Op != OpAMD64TESTL {
19644			break
19645		}
19646		_ = v_0.Args[1]
19647		v_0_0 := v_0.Args[0]
19648		v_0_1 := v_0.Args[1]
19649		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19650			z1 := v_0_0
19651			if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
19652				continue
19653			}
19654			z1_0 := z1.Args[0]
19655			if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
19656				continue
19657			}
19658			x := z1_0.Args[0]
19659			z2 := v_0_1
19660			if !(z1 == z2) {
19661				continue
19662			}
19663			v.reset(OpAMD64SETB)
19664			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19665			v0.AuxInt = int8ToAuxInt(0)
19666			v0.AddArg(x)
19667			v.AddArg(v0)
19668			return true
19669		}
19670		break
19671	}
19672	// match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2))
19673	// cond: z1==z2
19674	// result: (SETB (BTQconst [63] x))
19675	for {
19676		if v_0.Op != OpAMD64TESTQ {
19677			break
19678		}
19679		_ = v_0.Args[1]
19680		v_0_0 := v_0.Args[0]
19681		v_0_1 := v_0.Args[1]
19682		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19683			z1 := v_0_0
19684			if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
19685				continue
19686			}
19687			x := z1.Args[0]
19688			z2 := v_0_1
19689			if !(z1 == z2) {
19690				continue
19691			}
19692			v.reset(OpAMD64SETB)
19693			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19694			v0.AuxInt = int8ToAuxInt(63)
19695			v0.AddArg(x)
19696			v.AddArg(v0)
19697			return true
19698		}
19699		break
19700	}
19701	// match: (SETNE (TESTL z1:(SHRLconst [31] x) z2))
19702	// cond: z1==z2
19703	// result: (SETB (BTLconst [31] x))
19704	for {
19705		if v_0.Op != OpAMD64TESTL {
19706			break
19707		}
19708		_ = v_0.Args[1]
19709		v_0_0 := v_0.Args[0]
19710		v_0_1 := v_0.Args[1]
19711		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19712			z1 := v_0_0
19713			if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
19714				continue
19715			}
19716			x := z1.Args[0]
19717			z2 := v_0_1
19718			if !(z1 == z2) {
19719				continue
19720			}
19721			v.reset(OpAMD64SETB)
19722			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19723			v0.AuxInt = int8ToAuxInt(31)
19724			v0.AddArg(x)
19725			v.AddArg(v0)
19726			return true
19727		}
19728		break
19729	}
19730	// match: (SETNE (InvertFlags x))
19731	// result: (SETNE x)
19732	for {
19733		if v_0.Op != OpAMD64InvertFlags {
19734			break
19735		}
19736		x := v_0.Args[0]
19737		v.reset(OpAMD64SETNE)
19738		v.AddArg(x)
19739		return true
19740	}
19741	// match: (SETNE (FlagEQ))
19742	// result: (MOVLconst [0])
19743	for {
19744		if v_0.Op != OpAMD64FlagEQ {
19745			break
19746		}
19747		v.reset(OpAMD64MOVLconst)
19748		v.AuxInt = int32ToAuxInt(0)
19749		return true
19750	}
19751	// match: (SETNE (FlagLT_ULT))
19752	// result: (MOVLconst [1])
19753	for {
19754		if v_0.Op != OpAMD64FlagLT_ULT {
19755			break
19756		}
19757		v.reset(OpAMD64MOVLconst)
19758		v.AuxInt = int32ToAuxInt(1)
19759		return true
19760	}
19761	// match: (SETNE (FlagLT_UGT))
19762	// result: (MOVLconst [1])
19763	for {
19764		if v_0.Op != OpAMD64FlagLT_UGT {
19765			break
19766		}
19767		v.reset(OpAMD64MOVLconst)
19768		v.AuxInt = int32ToAuxInt(1)
19769		return true
19770	}
19771	// match: (SETNE (FlagGT_ULT))
19772	// result: (MOVLconst [1])
19773	for {
19774		if v_0.Op != OpAMD64FlagGT_ULT {
19775			break
19776		}
19777		v.reset(OpAMD64MOVLconst)
19778		v.AuxInt = int32ToAuxInt(1)
19779		return true
19780	}
19781	// match: (SETNE (FlagGT_UGT))
19782	// result: (MOVLconst [1])
19783	for {
19784		if v_0.Op != OpAMD64FlagGT_UGT {
19785			break
19786		}
19787		v.reset(OpAMD64MOVLconst)
19788		v.AuxInt = int32ToAuxInt(1)
19789		return true
19790	}
19791	// match: (SETNE (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
19792	// result: (SETNE (Select1 <types.TypeFlags> blsr))
19793	for {
19794		if v_0.Op != OpAMD64TESTQ {
19795			break
19796		}
19797		_ = v_0.Args[1]
19798		v_0_0 := v_0.Args[0]
19799		v_0_1 := v_0.Args[1]
19800		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19801			s := v_0_0
19802			if s.Op != OpSelect0 {
19803				continue
19804			}
19805			blsr := s.Args[0]
19806			if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
19807				continue
19808			}
19809			v.reset(OpAMD64SETNE)
19810			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
19811			v0.AddArg(blsr)
19812			v.AddArg(v0)
19813			return true
19814		}
19815		break
19816	}
19817	// match: (SETNE (TESTL s:(Select0 blsr:(BLSRL _)) s))
19818	// result: (SETNE (Select1 <types.TypeFlags> blsr))
19819	for {
19820		if v_0.Op != OpAMD64TESTL {
19821			break
19822		}
19823		_ = v_0.Args[1]
19824		v_0_0 := v_0.Args[0]
19825		v_0_1 := v_0.Args[1]
19826		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19827			s := v_0_0
19828			if s.Op != OpSelect0 {
19829				continue
19830			}
19831			blsr := s.Args[0]
19832			if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
19833				continue
19834			}
19835			v.reset(OpAMD64SETNE)
19836			v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
19837			v0.AddArg(blsr)
19838			v.AddArg(v0)
19839			return true
19840		}
19841		break
19842	}
19843	return false
19844}
19845func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
19846	v_2 := v.Args[2]
19847	v_1 := v.Args[1]
19848	v_0 := v.Args[0]
19849	b := v.Block
19850	typ := &b.Func.Config.Types
19851	// match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
19852	// result: (SETBstore [off] {sym} ptr (BTL x y) mem)
19853	for {
19854		off := auxIntToInt32(v.AuxInt)
19855		sym := auxToSym(v.Aux)
19856		ptr := v_0
19857		if v_1.Op != OpAMD64TESTL {
19858			break
19859		}
19860		_ = v_1.Args[1]
19861		v_1_0 := v_1.Args[0]
19862		v_1_1 := v_1.Args[1]
19863		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19864			if v_1_0.Op != OpAMD64SHLL {
19865				continue
19866			}
19867			x := v_1_0.Args[1]
19868			v_1_0_0 := v_1_0.Args[0]
19869			if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
19870				continue
19871			}
19872			y := v_1_1
19873			mem := v_2
19874			v.reset(OpAMD64SETBstore)
19875			v.AuxInt = int32ToAuxInt(off)
19876			v.Aux = symToAux(sym)
19877			v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
19878			v0.AddArg2(x, y)
19879			v.AddArg3(ptr, v0, mem)
19880			return true
19881		}
19882		break
19883	}
19884	// match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
19885	// result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
19886	for {
19887		off := auxIntToInt32(v.AuxInt)
19888		sym := auxToSym(v.Aux)
19889		ptr := v_0
19890		if v_1.Op != OpAMD64TESTQ {
19891			break
19892		}
19893		_ = v_1.Args[1]
19894		v_1_0 := v_1.Args[0]
19895		v_1_1 := v_1.Args[1]
19896		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19897			if v_1_0.Op != OpAMD64SHLQ {
19898				continue
19899			}
19900			x := v_1_0.Args[1]
19901			v_1_0_0 := v_1_0.Args[0]
19902			if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
19903				continue
19904			}
19905			y := v_1_1
19906			mem := v_2
19907			v.reset(OpAMD64SETBstore)
19908			v.AuxInt = int32ToAuxInt(off)
19909			v.Aux = symToAux(sym)
19910			v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
19911			v0.AddArg2(x, y)
19912			v.AddArg3(ptr, v0, mem)
19913			return true
19914		}
19915		break
19916	}
19917	// match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
19918	// cond: isUint32PowerOfTwo(int64(c))
19919	// result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
19920	for {
19921		off := auxIntToInt32(v.AuxInt)
19922		sym := auxToSym(v.Aux)
19923		ptr := v_0
19924		if v_1.Op != OpAMD64TESTLconst {
19925			break
19926		}
19927		c := auxIntToInt32(v_1.AuxInt)
19928		x := v_1.Args[0]
19929		mem := v_2
19930		if !(isUint32PowerOfTwo(int64(c))) {
19931			break
19932		}
19933		v.reset(OpAMD64SETBstore)
19934		v.AuxInt = int32ToAuxInt(off)
19935		v.Aux = symToAux(sym)
19936		v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19937		v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19938		v0.AddArg(x)
19939		v.AddArg3(ptr, v0, mem)
19940		return true
19941	}
19942	// match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem)
19943	// cond: isUint64PowerOfTwo(int64(c))
19944	// result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
19945	for {
19946		off := auxIntToInt32(v.AuxInt)
19947		sym := auxToSym(v.Aux)
19948		ptr := v_0
19949		if v_1.Op != OpAMD64TESTQconst {
19950			break
19951		}
19952		c := auxIntToInt32(v_1.AuxInt)
19953		x := v_1.Args[0]
19954		mem := v_2
19955		if !(isUint64PowerOfTwo(int64(c))) {
19956			break
19957		}
19958		v.reset(OpAMD64SETBstore)
19959		v.AuxInt = int32ToAuxInt(off)
19960		v.Aux = symToAux(sym)
19961		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19962		v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19963		v0.AddArg(x)
19964		v.AddArg3(ptr, v0, mem)
19965		return true
19966	}
19967	// match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
19968	// cond: isUint64PowerOfTwo(c)
19969	// result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
19970	for {
19971		off := auxIntToInt32(v.AuxInt)
19972		sym := auxToSym(v.Aux)
19973		ptr := v_0
19974		if v_1.Op != OpAMD64TESTQ {
19975			break
19976		}
19977		_ = v_1.Args[1]
19978		v_1_0 := v_1.Args[0]
19979		v_1_1 := v_1.Args[1]
19980		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19981			if v_1_0.Op != OpAMD64MOVQconst {
19982				continue
19983			}
19984			c := auxIntToInt64(v_1_0.AuxInt)
19985			x := v_1_1
19986			mem := v_2
19987			if !(isUint64PowerOfTwo(c)) {
19988				continue
19989			}
19990			v.reset(OpAMD64SETBstore)
19991			v.AuxInt = int32ToAuxInt(off)
19992			v.Aux = symToAux(sym)
19993			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19994			v0.AuxInt = int8ToAuxInt(int8(log64(c)))
19995			v0.AddArg(x)
19996			v.AddArg3(ptr, v0, mem)
19997			return true
19998		}
19999		break
20000	}
20001	// match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
20002	// result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem)
20003	for {
20004		off := auxIntToInt32(v.AuxInt)
20005		sym := auxToSym(v.Aux)
20006		ptr := v_0
20007		if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
20008			break
20009		}
20010		s := v_1.Args[0]
20011		if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
20012			break
20013		}
20014		mem := v_2
20015		v.reset(OpAMD64SETEQstore)
20016		v.AuxInt = int32ToAuxInt(off)
20017		v.Aux = symToAux(sym)
20018		v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
20019		v0.AuxInt = int32ToAuxInt(0)
20020		v0.AddArg(s)
20021		v.AddArg3(ptr, v0, mem)
20022		return true
20023	}
20024	// match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
20025	// result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem)
20026	for {
20027		off := auxIntToInt32(v.AuxInt)
20028		sym := auxToSym(v.Aux)
20029		ptr := v_0
20030		if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
20031			break
20032		}
20033		s := v_1.Args[0]
20034		if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
20035			break
20036		}
20037		mem := v_2
20038		v.reset(OpAMD64SETEQstore)
20039		v.AuxInt = int32ToAuxInt(off)
20040		v.Aux = symToAux(sym)
20041		v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
20042		v0.AuxInt = int32ToAuxInt(0)
20043		v0.AddArg(s)
20044		v.AddArg3(ptr, v0, mem)
20045		return true
20046	}
20047	// match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
20048	// cond: z1==z2
20049	// result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
20050	for {
20051		off := auxIntToInt32(v.AuxInt)
20052		sym := auxToSym(v.Aux)
20053		ptr := v_0
20054		if v_1.Op != OpAMD64TESTQ {
20055			break
20056		}
20057		_ = v_1.Args[1]
20058		v_1_0 := v_1.Args[0]
20059		v_1_1 := v_1.Args[1]
20060		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20061			z1 := v_1_0
20062			if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
20063				continue
20064			}
20065			z1_0 := z1.Args[0]
20066			if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20067				continue
20068			}
20069			x := z1_0.Args[0]
20070			z2 := v_1_1
20071			mem := v_2
20072			if !(z1 == z2) {
20073				continue
20074			}
20075			v.reset(OpAMD64SETBstore)
20076			v.AuxInt = int32ToAuxInt(off)
20077			v.Aux = symToAux(sym)
20078			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20079			v0.AuxInt = int8ToAuxInt(63)
20080			v0.AddArg(x)
20081			v.AddArg3(ptr, v0, mem)
20082			return true
20083		}
20084		break
20085	}
20086	// match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
20087	// cond: z1==z2
20088	// result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
20089	for {
20090		off := auxIntToInt32(v.AuxInt)
20091		sym := auxToSym(v.Aux)
20092		ptr := v_0
20093		if v_1.Op != OpAMD64TESTL {
20094			break
20095		}
20096		_ = v_1.Args[1]
20097		v_1_0 := v_1.Args[0]
20098		v_1_1 := v_1.Args[1]
20099		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20100			z1 := v_1_0
20101			if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
20102				continue
20103			}
20104			z1_0 := z1.Args[0]
20105			if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20106				continue
20107			}
20108			x := z1_0.Args[0]
20109			z2 := v_1_1
20110			mem := v_2
20111			if !(z1 == z2) {
20112				continue
20113			}
20114			v.reset(OpAMD64SETBstore)
20115			v.AuxInt = int32ToAuxInt(off)
20116			v.Aux = symToAux(sym)
20117			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20118			v0.AuxInt = int8ToAuxInt(31)
20119			v0.AddArg(x)
20120			v.AddArg3(ptr, v0, mem)
20121			return true
20122		}
20123		break
20124	}
20125	// match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
20126	// cond: z1==z2
20127	// result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem)
20128	for {
20129		off := auxIntToInt32(v.AuxInt)
20130		sym := auxToSym(v.Aux)
20131		ptr := v_0
20132		if v_1.Op != OpAMD64TESTQ {
20133			break
20134		}
20135		_ = v_1.Args[1]
20136		v_1_0 := v_1.Args[0]
20137		v_1_1 := v_1.Args[1]
20138		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20139			z1 := v_1_0
20140			if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20141				continue
20142			}
20143			z1_0 := z1.Args[0]
20144			if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20145				continue
20146			}
20147			x := z1_0.Args[0]
20148			z2 := v_1_1
20149			mem := v_2
20150			if !(z1 == z2) {
20151				continue
20152			}
20153			v.reset(OpAMD64SETBstore)
20154			v.AuxInt = int32ToAuxInt(off)
20155			v.Aux = symToAux(sym)
20156			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20157			v0.AuxInt = int8ToAuxInt(0)
20158			v0.AddArg(x)
20159			v.AddArg3(ptr, v0, mem)
20160			return true
20161		}
20162		break
20163	}
20164	// match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
20165	// cond: z1==z2
20166	// result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem)
20167	for {
20168		off := auxIntToInt32(v.AuxInt)
20169		sym := auxToSym(v.Aux)
20170		ptr := v_0
20171		if v_1.Op != OpAMD64TESTL {
20172			break
20173		}
20174		_ = v_1.Args[1]
20175		v_1_0 := v_1.Args[0]
20176		v_1_1 := v_1.Args[1]
20177		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20178			z1 := v_1_0
20179			if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20180				continue
20181			}
20182			z1_0 := z1.Args[0]
20183			if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20184				continue
20185			}
20186			x := z1_0.Args[0]
20187			z2 := v_1_1
20188			mem := v_2
20189			if !(z1 == z2) {
20190				continue
20191			}
20192			v.reset(OpAMD64SETBstore)
20193			v.AuxInt = int32ToAuxInt(off)
20194			v.Aux = symToAux(sym)
20195			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20196			v0.AuxInt = int8ToAuxInt(0)
20197			v0.AddArg(x)
20198			v.AddArg3(ptr, v0, mem)
20199			return true
20200		}
20201		break
20202	}
20203	// match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
20204	// cond: z1==z2
20205	// result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
20206	for {
20207		off := auxIntToInt32(v.AuxInt)
20208		sym := auxToSym(v.Aux)
20209		ptr := v_0
20210		if v_1.Op != OpAMD64TESTQ {
20211			break
20212		}
20213		_ = v_1.Args[1]
20214		v_1_0 := v_1.Args[0]
20215		v_1_1 := v_1.Args[1]
20216		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20217			z1 := v_1_0
20218			if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20219				continue
20220			}
20221			x := z1.Args[0]
20222			z2 := v_1_1
20223			mem := v_2
20224			if !(z1 == z2) {
20225				continue
20226			}
20227			v.reset(OpAMD64SETBstore)
20228			v.AuxInt = int32ToAuxInt(off)
20229			v.Aux = symToAux(sym)
20230			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20231			v0.AuxInt = int8ToAuxInt(63)
20232			v0.AddArg(x)
20233			v.AddArg3(ptr, v0, mem)
20234			return true
20235		}
20236		break
20237	}
20238	// match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
20239	// cond: z1==z2
20240	// result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
20241	for {
20242		off := auxIntToInt32(v.AuxInt)
20243		sym := auxToSym(v.Aux)
20244		ptr := v_0
20245		if v_1.Op != OpAMD64TESTL {
20246			break
20247		}
20248		_ = v_1.Args[1]
20249		v_1_0 := v_1.Args[0]
20250		v_1_1 := v_1.Args[1]
20251		for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20252			z1 := v_1_0
20253			if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20254				continue
20255			}
20256			x := z1.Args[0]
20257			z2 := v_1_1
20258			mem := v_2
20259			if !(z1 == z2) {
20260				continue
20261			}
20262			v.reset(OpAMD64SETBstore)
20263			v.AuxInt = int32ToAuxInt(off)
20264			v.Aux = symToAux(sym)
20265			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20266			v0.AuxInt = int8ToAuxInt(31)
20267			v0.AddArg(x)
20268			v.AddArg3(ptr, v0, mem)
20269			return true
20270		}
20271		break
20272	}
20273	// match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem)
20274	// result: (SETNEstore [off] {sym} ptr x mem)
20275	for {
20276		off := auxIntToInt32(v.AuxInt)
20277		sym := auxToSym(v.Aux)
20278		ptr := v_0
20279		if v_1.Op != OpAMD64InvertFlags {
20280			break
20281		}
20282		x := v_1.Args[0]
20283		mem := v_2
20284		v.reset(OpAMD64SETNEstore)
20285		v.AuxInt = int32ToAuxInt(off)
20286		v.Aux = symToAux(sym)
20287		v.AddArg3(ptr, x, mem)
20288		return true
20289	}
20290	// match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem)
20291	// cond: is32Bit(int64(off1)+int64(off2))
20292	// result: (SETNEstore [off1+off2] {sym} base val mem)
20293	for {
20294		off1 := auxIntToInt32(v.AuxInt)
20295		sym := auxToSym(v.Aux)
20296		if v_0.Op != OpAMD64ADDQconst {
20297			break
20298		}
20299		off2 := auxIntToInt32(v_0.AuxInt)
20300		base := v_0.Args[0]
20301		val := v_1
20302		mem := v_2
20303		if !(is32Bit(int64(off1) + int64(off2))) {
20304			break
20305		}
20306		v.reset(OpAMD64SETNEstore)
20307		v.AuxInt = int32ToAuxInt(off1 + off2)
20308		v.Aux = symToAux(sym)
20309		v.AddArg3(base, val, mem)
20310		return true
20311	}
20312	// match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
20313	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
20314	// result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
20315	for {
20316		off1 := auxIntToInt32(v.AuxInt)
20317		sym1 := auxToSym(v.Aux)
20318		if v_0.Op != OpAMD64LEAQ {
20319			break
20320		}
20321		off2 := auxIntToInt32(v_0.AuxInt)
20322		sym2 := auxToSym(v_0.Aux)
20323		base := v_0.Args[0]
20324		val := v_1
20325		mem := v_2
20326		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20327			break
20328		}
20329		v.reset(OpAMD64SETNEstore)
20330		v.AuxInt = int32ToAuxInt(off1 + off2)
20331		v.Aux = symToAux(mergeSym(sym1, sym2))
20332		v.AddArg3(base, val, mem)
20333		return true
20334	}
20335	// match: (SETNEstore [off] {sym} ptr (FlagEQ) mem)
20336	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
20337	for {
20338		off := auxIntToInt32(v.AuxInt)
20339		sym := auxToSym(v.Aux)
20340		ptr := v_0
20341		if v_1.Op != OpAMD64FlagEQ {
20342			break
20343		}
20344		mem := v_2
20345		v.reset(OpAMD64MOVBstore)
20346		v.AuxInt = int32ToAuxInt(off)
20347		v.Aux = symToAux(sym)
20348		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20349		v0.AuxInt = int32ToAuxInt(0)
20350		v.AddArg3(ptr, v0, mem)
20351		return true
20352	}
20353	// match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem)
20354	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
20355	for {
20356		off := auxIntToInt32(v.AuxInt)
20357		sym := auxToSym(v.Aux)
20358		ptr := v_0
20359		if v_1.Op != OpAMD64FlagLT_ULT {
20360			break
20361		}
20362		mem := v_2
20363		v.reset(OpAMD64MOVBstore)
20364		v.AuxInt = int32ToAuxInt(off)
20365		v.Aux = symToAux(sym)
20366		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20367		v0.AuxInt = int32ToAuxInt(1)
20368		v.AddArg3(ptr, v0, mem)
20369		return true
20370	}
20371	// match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem)
20372	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
20373	for {
20374		off := auxIntToInt32(v.AuxInt)
20375		sym := auxToSym(v.Aux)
20376		ptr := v_0
20377		if v_1.Op != OpAMD64FlagLT_UGT {
20378			break
20379		}
20380		mem := v_2
20381		v.reset(OpAMD64MOVBstore)
20382		v.AuxInt = int32ToAuxInt(off)
20383		v.Aux = symToAux(sym)
20384		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20385		v0.AuxInt = int32ToAuxInt(1)
20386		v.AddArg3(ptr, v0, mem)
20387		return true
20388	}
20389	// match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem)
20390	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
20391	for {
20392		off := auxIntToInt32(v.AuxInt)
20393		sym := auxToSym(v.Aux)
20394		ptr := v_0
20395		if v_1.Op != OpAMD64FlagGT_ULT {
20396			break
20397		}
20398		mem := v_2
20399		v.reset(OpAMD64MOVBstore)
20400		v.AuxInt = int32ToAuxInt(off)
20401		v.Aux = symToAux(sym)
20402		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20403		v0.AuxInt = int32ToAuxInt(1)
20404		v.AddArg3(ptr, v0, mem)
20405		return true
20406	}
20407	// match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem)
20408	// result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
20409	for {
20410		off := auxIntToInt32(v.AuxInt)
20411		sym := auxToSym(v.Aux)
20412		ptr := v_0
20413		if v_1.Op != OpAMD64FlagGT_UGT {
20414			break
20415		}
20416		mem := v_2
20417		v.reset(OpAMD64MOVBstore)
20418		v.AuxInt = int32ToAuxInt(off)
20419		v.Aux = symToAux(sym)
20420		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20421		v0.AuxInt = int32ToAuxInt(1)
20422		v.AddArg3(ptr, v0, mem)
20423		return true
20424	}
20425	return false
20426}
20427func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
20428	v_1 := v.Args[1]
20429	v_0 := v.Args[0]
20430	b := v.Block
20431	// match: (SHLL x (MOVQconst [c]))
20432	// result: (SHLLconst [int8(c&31)] x)
20433	for {
20434		x := v_0
20435		if v_1.Op != OpAMD64MOVQconst {
20436			break
20437		}
20438		c := auxIntToInt64(v_1.AuxInt)
20439		v.reset(OpAMD64SHLLconst)
20440		v.AuxInt = int8ToAuxInt(int8(c & 31))
20441		v.AddArg(x)
20442		return true
20443	}
20444	// match: (SHLL x (MOVLconst [c]))
20445	// result: (SHLLconst [int8(c&31)] x)
20446	for {
20447		x := v_0
20448		if v_1.Op != OpAMD64MOVLconst {
20449			break
20450		}
20451		c := auxIntToInt32(v_1.AuxInt)
20452		v.reset(OpAMD64SHLLconst)
20453		v.AuxInt = int8ToAuxInt(int8(c & 31))
20454		v.AddArg(x)
20455		return true
20456	}
20457	// match: (SHLL x (ADDQconst [c] y))
20458	// cond: c & 31 == 0
20459	// result: (SHLL x y)
20460	for {
20461		x := v_0
20462		if v_1.Op != OpAMD64ADDQconst {
20463			break
20464		}
20465		c := auxIntToInt32(v_1.AuxInt)
20466		y := v_1.Args[0]
20467		if !(c&31 == 0) {
20468			break
20469		}
20470		v.reset(OpAMD64SHLL)
20471		v.AddArg2(x, y)
20472		return true
20473	}
20474	// match: (SHLL x (NEGQ <t> (ADDQconst [c] y)))
20475	// cond: c & 31 == 0
20476	// result: (SHLL x (NEGQ <t> y))
20477	for {
20478		x := v_0
20479		if v_1.Op != OpAMD64NEGQ {
20480			break
20481		}
20482		t := v_1.Type
20483		v_1_0 := v_1.Args[0]
20484		if v_1_0.Op != OpAMD64ADDQconst {
20485			break
20486		}
20487		c := auxIntToInt32(v_1_0.AuxInt)
20488		y := v_1_0.Args[0]
20489		if !(c&31 == 0) {
20490			break
20491		}
20492		v.reset(OpAMD64SHLL)
20493		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20494		v0.AddArg(y)
20495		v.AddArg2(x, v0)
20496		return true
20497	}
20498	// match: (SHLL x (ANDQconst [c] y))
20499	// cond: c & 31 == 31
20500	// result: (SHLL x y)
20501	for {
20502		x := v_0
20503		if v_1.Op != OpAMD64ANDQconst {
20504			break
20505		}
20506		c := auxIntToInt32(v_1.AuxInt)
20507		y := v_1.Args[0]
20508		if !(c&31 == 31) {
20509			break
20510		}
20511		v.reset(OpAMD64SHLL)
20512		v.AddArg2(x, y)
20513		return true
20514	}
20515	// match: (SHLL x (NEGQ <t> (ANDQconst [c] y)))
20516	// cond: c & 31 == 31
20517	// result: (SHLL x (NEGQ <t> y))
20518	for {
20519		x := v_0
20520		if v_1.Op != OpAMD64NEGQ {
20521			break
20522		}
20523		t := v_1.Type
20524		v_1_0 := v_1.Args[0]
20525		if v_1_0.Op != OpAMD64ANDQconst {
20526			break
20527		}
20528		c := auxIntToInt32(v_1_0.AuxInt)
20529		y := v_1_0.Args[0]
20530		if !(c&31 == 31) {
20531			break
20532		}
20533		v.reset(OpAMD64SHLL)
20534		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20535		v0.AddArg(y)
20536		v.AddArg2(x, v0)
20537		return true
20538	}
20539	// match: (SHLL x (ADDLconst [c] y))
20540	// cond: c & 31 == 0
20541	// result: (SHLL x y)
20542	for {
20543		x := v_0
20544		if v_1.Op != OpAMD64ADDLconst {
20545			break
20546		}
20547		c := auxIntToInt32(v_1.AuxInt)
20548		y := v_1.Args[0]
20549		if !(c&31 == 0) {
20550			break
20551		}
20552		v.reset(OpAMD64SHLL)
20553		v.AddArg2(x, y)
20554		return true
20555	}
20556	// match: (SHLL x (NEGL <t> (ADDLconst [c] y)))
20557	// cond: c & 31 == 0
20558	// result: (SHLL x (NEGL <t> y))
20559	for {
20560		x := v_0
20561		if v_1.Op != OpAMD64NEGL {
20562			break
20563		}
20564		t := v_1.Type
20565		v_1_0 := v_1.Args[0]
20566		if v_1_0.Op != OpAMD64ADDLconst {
20567			break
20568		}
20569		c := auxIntToInt32(v_1_0.AuxInt)
20570		y := v_1_0.Args[0]
20571		if !(c&31 == 0) {
20572			break
20573		}
20574		v.reset(OpAMD64SHLL)
20575		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20576		v0.AddArg(y)
20577		v.AddArg2(x, v0)
20578		return true
20579	}
20580	// match: (SHLL x (ANDLconst [c] y))
20581	// cond: c & 31 == 31
20582	// result: (SHLL x y)
20583	for {
20584		x := v_0
20585		if v_1.Op != OpAMD64ANDLconst {
20586			break
20587		}
20588		c := auxIntToInt32(v_1.AuxInt)
20589		y := v_1.Args[0]
20590		if !(c&31 == 31) {
20591			break
20592		}
20593		v.reset(OpAMD64SHLL)
20594		v.AddArg2(x, y)
20595		return true
20596	}
20597	// match: (SHLL x (NEGL <t> (ANDLconst [c] y)))
20598	// cond: c & 31 == 31
20599	// result: (SHLL x (NEGL <t> y))
20600	for {
20601		x := v_0
20602		if v_1.Op != OpAMD64NEGL {
20603			break
20604		}
20605		t := v_1.Type
20606		v_1_0 := v_1.Args[0]
20607		if v_1_0.Op != OpAMD64ANDLconst {
20608			break
20609		}
20610		c := auxIntToInt32(v_1_0.AuxInt)
20611		y := v_1_0.Args[0]
20612		if !(c&31 == 31) {
20613			break
20614		}
20615		v.reset(OpAMD64SHLL)
20616		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20617		v0.AddArg(y)
20618		v.AddArg2(x, v0)
20619		return true
20620	}
20621	// match: (SHLL l:(MOVLload [off] {sym} ptr mem) x)
20622	// cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
20623	// result: (SHLXLload [off] {sym} ptr x mem)
20624	for {
20625		l := v_0
20626		if l.Op != OpAMD64MOVLload {
20627			break
20628		}
20629		off := auxIntToInt32(l.AuxInt)
20630		sym := auxToSym(l.Aux)
20631		mem := l.Args[1]
20632		ptr := l.Args[0]
20633		x := v_1
20634		if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
20635			break
20636		}
20637		v.reset(OpAMD64SHLXLload)
20638		v.AuxInt = int32ToAuxInt(off)
20639		v.Aux = symToAux(sym)
20640		v.AddArg3(ptr, x, mem)
20641		return true
20642	}
20643	return false
20644}
20645func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
20646	v_0 := v.Args[0]
20647	// match: (SHLLconst [1] (SHRLconst [1] x))
20648	// result: (ANDLconst [-2] x)
20649	for {
20650		if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
20651			break
20652		}
20653		x := v_0.Args[0]
20654		v.reset(OpAMD64ANDLconst)
20655		v.AuxInt = int32ToAuxInt(-2)
20656		v.AddArg(x)
20657		return true
20658	}
20659	// match: (SHLLconst x [0])
20660	// result: x
20661	for {
20662		if auxIntToInt8(v.AuxInt) != 0 {
20663			break
20664		}
20665		x := v_0
20666		v.copyOf(x)
20667		return true
20668	}
20669	// match: (SHLLconst [d] (MOVLconst [c]))
20670	// result: (MOVLconst [c << uint64(d)])
20671	for {
20672		d := auxIntToInt8(v.AuxInt)
20673		if v_0.Op != OpAMD64MOVLconst {
20674			break
20675		}
20676		c := auxIntToInt32(v_0.AuxInt)
20677		v.reset(OpAMD64MOVLconst)
20678		v.AuxInt = int32ToAuxInt(c << uint64(d))
20679		return true
20680	}
20681	return false
20682}
20683func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
20684	v_1 := v.Args[1]
20685	v_0 := v.Args[0]
20686	b := v.Block
20687	// match: (SHLQ x (MOVQconst [c]))
20688	// result: (SHLQconst [int8(c&63)] x)
20689	for {
20690		x := v_0
20691		if v_1.Op != OpAMD64MOVQconst {
20692			break
20693		}
20694		c := auxIntToInt64(v_1.AuxInt)
20695		v.reset(OpAMD64SHLQconst)
20696		v.AuxInt = int8ToAuxInt(int8(c & 63))
20697		v.AddArg(x)
20698		return true
20699	}
20700	// match: (SHLQ x (MOVLconst [c]))
20701	// result: (SHLQconst [int8(c&63)] x)
20702	for {
20703		x := v_0
20704		if v_1.Op != OpAMD64MOVLconst {
20705			break
20706		}
20707		c := auxIntToInt32(v_1.AuxInt)
20708		v.reset(OpAMD64SHLQconst)
20709		v.AuxInt = int8ToAuxInt(int8(c & 63))
20710		v.AddArg(x)
20711		return true
20712	}
20713	// match: (SHLQ x (ADDQconst [c] y))
20714	// cond: c & 63 == 0
20715	// result: (SHLQ x y)
20716	for {
20717		x := v_0
20718		if v_1.Op != OpAMD64ADDQconst {
20719			break
20720		}
20721		c := auxIntToInt32(v_1.AuxInt)
20722		y := v_1.Args[0]
20723		if !(c&63 == 0) {
20724			break
20725		}
20726		v.reset(OpAMD64SHLQ)
20727		v.AddArg2(x, y)
20728		return true
20729	}
20730	// match: (SHLQ x (NEGQ <t> (ADDQconst [c] y)))
20731	// cond: c & 63 == 0
20732	// result: (SHLQ x (NEGQ <t> y))
20733	for {
20734		x := v_0
20735		if v_1.Op != OpAMD64NEGQ {
20736			break
20737		}
20738		t := v_1.Type
20739		v_1_0 := v_1.Args[0]
20740		if v_1_0.Op != OpAMD64ADDQconst {
20741			break
20742		}
20743		c := auxIntToInt32(v_1_0.AuxInt)
20744		y := v_1_0.Args[0]
20745		if !(c&63 == 0) {
20746			break
20747		}
20748		v.reset(OpAMD64SHLQ)
20749		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20750		v0.AddArg(y)
20751		v.AddArg2(x, v0)
20752		return true
20753	}
20754	// match: (SHLQ x (ANDQconst [c] y))
20755	// cond: c & 63 == 63
20756	// result: (SHLQ x y)
20757	for {
20758		x := v_0
20759		if v_1.Op != OpAMD64ANDQconst {
20760			break
20761		}
20762		c := auxIntToInt32(v_1.AuxInt)
20763		y := v_1.Args[0]
20764		if !(c&63 == 63) {
20765			break
20766		}
20767		v.reset(OpAMD64SHLQ)
20768		v.AddArg2(x, y)
20769		return true
20770	}
20771	// match: (SHLQ x (NEGQ <t> (ANDQconst [c] y)))
20772	// cond: c & 63 == 63
20773	// result: (SHLQ x (NEGQ <t> y))
20774	for {
20775		x := v_0
20776		if v_1.Op != OpAMD64NEGQ {
20777			break
20778		}
20779		t := v_1.Type
20780		v_1_0 := v_1.Args[0]
20781		if v_1_0.Op != OpAMD64ANDQconst {
20782			break
20783		}
20784		c := auxIntToInt32(v_1_0.AuxInt)
20785		y := v_1_0.Args[0]
20786		if !(c&63 == 63) {
20787			break
20788		}
20789		v.reset(OpAMD64SHLQ)
20790		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20791		v0.AddArg(y)
20792		v.AddArg2(x, v0)
20793		return true
20794	}
20795	// match: (SHLQ x (ADDLconst [c] y))
20796	// cond: c & 63 == 0
20797	// result: (SHLQ x y)
20798	for {
20799		x := v_0
20800		if v_1.Op != OpAMD64ADDLconst {
20801			break
20802		}
20803		c := auxIntToInt32(v_1.AuxInt)
20804		y := v_1.Args[0]
20805		if !(c&63 == 0) {
20806			break
20807		}
20808		v.reset(OpAMD64SHLQ)
20809		v.AddArg2(x, y)
20810		return true
20811	}
20812	// match: (SHLQ x (NEGL <t> (ADDLconst [c] y)))
20813	// cond: c & 63 == 0
20814	// result: (SHLQ x (NEGL <t> y))
20815	for {
20816		x := v_0
20817		if v_1.Op != OpAMD64NEGL {
20818			break
20819		}
20820		t := v_1.Type
20821		v_1_0 := v_1.Args[0]
20822		if v_1_0.Op != OpAMD64ADDLconst {
20823			break
20824		}
20825		c := auxIntToInt32(v_1_0.AuxInt)
20826		y := v_1_0.Args[0]
20827		if !(c&63 == 0) {
20828			break
20829		}
20830		v.reset(OpAMD64SHLQ)
20831		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20832		v0.AddArg(y)
20833		v.AddArg2(x, v0)
20834		return true
20835	}
20836	// match: (SHLQ x (ANDLconst [c] y))
20837	// cond: c & 63 == 63
20838	// result: (SHLQ x y)
20839	for {
20840		x := v_0
20841		if v_1.Op != OpAMD64ANDLconst {
20842			break
20843		}
20844		c := auxIntToInt32(v_1.AuxInt)
20845		y := v_1.Args[0]
20846		if !(c&63 == 63) {
20847			break
20848		}
20849		v.reset(OpAMD64SHLQ)
20850		v.AddArg2(x, y)
20851		return true
20852	}
20853	// match: (SHLQ x (NEGL <t> (ANDLconst [c] y)))
20854	// cond: c & 63 == 63
20855	// result: (SHLQ x (NEGL <t> y))
20856	for {
20857		x := v_0
20858		if v_1.Op != OpAMD64NEGL {
20859			break
20860		}
20861		t := v_1.Type
20862		v_1_0 := v_1.Args[0]
20863		if v_1_0.Op != OpAMD64ANDLconst {
20864			break
20865		}
20866		c := auxIntToInt32(v_1_0.AuxInt)
20867		y := v_1_0.Args[0]
20868		if !(c&63 == 63) {
20869			break
20870		}
20871		v.reset(OpAMD64SHLQ)
20872		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20873		v0.AddArg(y)
20874		v.AddArg2(x, v0)
20875		return true
20876	}
20877	// match: (SHLQ l:(MOVQload [off] {sym} ptr mem) x)
20878	// cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
20879	// result: (SHLXQload [off] {sym} ptr x mem)
20880	for {
20881		l := v_0
20882		if l.Op != OpAMD64MOVQload {
20883			break
20884		}
20885		off := auxIntToInt32(l.AuxInt)
20886		sym := auxToSym(l.Aux)
20887		mem := l.Args[1]
20888		ptr := l.Args[0]
20889		x := v_1
20890		if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
20891			break
20892		}
20893		v.reset(OpAMD64SHLXQload)
20894		v.AuxInt = int32ToAuxInt(off)
20895		v.Aux = symToAux(sym)
20896		v.AddArg3(ptr, x, mem)
20897		return true
20898	}
20899	return false
20900}
20901func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
20902	v_0 := v.Args[0]
20903	// match: (SHLQconst [1] (SHRQconst [1] x))
20904	// result: (ANDQconst [-2] x)
20905	for {
20906		if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
20907			break
20908		}
20909		x := v_0.Args[0]
20910		v.reset(OpAMD64ANDQconst)
20911		v.AuxInt = int32ToAuxInt(-2)
20912		v.AddArg(x)
20913		return true
20914	}
20915	// match: (SHLQconst x [0])
20916	// result: x
20917	for {
20918		if auxIntToInt8(v.AuxInt) != 0 {
20919			break
20920		}
20921		x := v_0
20922		v.copyOf(x)
20923		return true
20924	}
20925	// match: (SHLQconst [d] (MOVQconst [c]))
20926	// result: (MOVQconst [c << uint64(d)])
20927	for {
20928		d := auxIntToInt8(v.AuxInt)
20929		if v_0.Op != OpAMD64MOVQconst {
20930			break
20931		}
20932		c := auxIntToInt64(v_0.AuxInt)
20933		v.reset(OpAMD64MOVQconst)
20934		v.AuxInt = int64ToAuxInt(c << uint64(d))
20935		return true
20936	}
20937	// match: (SHLQconst [d] (MOVLconst [c]))
20938	// result: (MOVQconst [int64(c) << uint64(d)])
20939	for {
20940		d := auxIntToInt8(v.AuxInt)
20941		if v_0.Op != OpAMD64MOVLconst {
20942			break
20943		}
20944		c := auxIntToInt32(v_0.AuxInt)
20945		v.reset(OpAMD64MOVQconst)
20946		v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
20947		return true
20948	}
20949	return false
20950}
20951func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool {
20952	v_2 := v.Args[2]
20953	v_1 := v.Args[1]
20954	v_0 := v.Args[0]
20955	b := v.Block
20956	typ := &b.Func.Config.Types
20957	// match: (SHLXLload [off] {sym} ptr (MOVLconst [c]) mem)
20958	// result: (SHLLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
20959	for {
20960		off := auxIntToInt32(v.AuxInt)
20961		sym := auxToSym(v.Aux)
20962		ptr := v_0
20963		if v_1.Op != OpAMD64MOVLconst {
20964			break
20965		}
20966		c := auxIntToInt32(v_1.AuxInt)
20967		mem := v_2
20968		v.reset(OpAMD64SHLLconst)
20969		v.AuxInt = int8ToAuxInt(int8(c & 31))
20970		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
20971		v0.AuxInt = int32ToAuxInt(off)
20972		v0.Aux = symToAux(sym)
20973		v0.AddArg2(ptr, mem)
20974		v.AddArg(v0)
20975		return true
20976	}
20977	return false
20978}
20979func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool {
20980	v_2 := v.Args[2]
20981	v_1 := v.Args[1]
20982	v_0 := v.Args[0]
20983	b := v.Block
20984	typ := &b.Func.Config.Types
20985	// match: (SHLXQload [off] {sym} ptr (MOVQconst [c]) mem)
20986	// result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
20987	for {
20988		off := auxIntToInt32(v.AuxInt)
20989		sym := auxToSym(v.Aux)
20990		ptr := v_0
20991		if v_1.Op != OpAMD64MOVQconst {
20992			break
20993		}
20994		c := auxIntToInt64(v_1.AuxInt)
20995		mem := v_2
20996		v.reset(OpAMD64SHLQconst)
20997		v.AuxInt = int8ToAuxInt(int8(c & 63))
20998		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
20999		v0.AuxInt = int32ToAuxInt(off)
21000		v0.Aux = symToAux(sym)
21001		v0.AddArg2(ptr, mem)
21002		v.AddArg(v0)
21003		return true
21004	}
21005	// match: (SHLXQload [off] {sym} ptr (MOVLconst [c]) mem)
21006	// result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
21007	for {
21008		off := auxIntToInt32(v.AuxInt)
21009		sym := auxToSym(v.Aux)
21010		ptr := v_0
21011		if v_1.Op != OpAMD64MOVLconst {
21012			break
21013		}
21014		c := auxIntToInt32(v_1.AuxInt)
21015		mem := v_2
21016		v.reset(OpAMD64SHLQconst)
21017		v.AuxInt = int8ToAuxInt(int8(c & 63))
21018		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21019		v0.AuxInt = int32ToAuxInt(off)
21020		v0.Aux = symToAux(sym)
21021		v0.AddArg2(ptr, mem)
21022		v.AddArg(v0)
21023		return true
21024	}
21025	return false
21026}
21027func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
21028	v_1 := v.Args[1]
21029	v_0 := v.Args[0]
21030	// match: (SHRB x (MOVQconst [c]))
21031	// cond: c&31 < 8
21032	// result: (SHRBconst [int8(c&31)] x)
21033	for {
21034		x := v_0
21035		if v_1.Op != OpAMD64MOVQconst {
21036			break
21037		}
21038		c := auxIntToInt64(v_1.AuxInt)
21039		if !(c&31 < 8) {
21040			break
21041		}
21042		v.reset(OpAMD64SHRBconst)
21043		v.AuxInt = int8ToAuxInt(int8(c & 31))
21044		v.AddArg(x)
21045		return true
21046	}
21047	// match: (SHRB x (MOVLconst [c]))
21048	// cond: c&31 < 8
21049	// result: (SHRBconst [int8(c&31)] x)
21050	for {
21051		x := v_0
21052		if v_1.Op != OpAMD64MOVLconst {
21053			break
21054		}
21055		c := auxIntToInt32(v_1.AuxInt)
21056		if !(c&31 < 8) {
21057			break
21058		}
21059		v.reset(OpAMD64SHRBconst)
21060		v.AuxInt = int8ToAuxInt(int8(c & 31))
21061		v.AddArg(x)
21062		return true
21063	}
21064	// match: (SHRB _ (MOVQconst [c]))
21065	// cond: c&31 >= 8
21066	// result: (MOVLconst [0])
21067	for {
21068		if v_1.Op != OpAMD64MOVQconst {
21069			break
21070		}
21071		c := auxIntToInt64(v_1.AuxInt)
21072		if !(c&31 >= 8) {
21073			break
21074		}
21075		v.reset(OpAMD64MOVLconst)
21076		v.AuxInt = int32ToAuxInt(0)
21077		return true
21078	}
21079	// match: (SHRB _ (MOVLconst [c]))
21080	// cond: c&31 >= 8
21081	// result: (MOVLconst [0])
21082	for {
21083		if v_1.Op != OpAMD64MOVLconst {
21084			break
21085		}
21086		c := auxIntToInt32(v_1.AuxInt)
21087		if !(c&31 >= 8) {
21088			break
21089		}
21090		v.reset(OpAMD64MOVLconst)
21091		v.AuxInt = int32ToAuxInt(0)
21092		return true
21093	}
21094	return false
21095}
21096func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
21097	v_0 := v.Args[0]
21098	// match: (SHRBconst x [0])
21099	// result: x
21100	for {
21101		if auxIntToInt8(v.AuxInt) != 0 {
21102			break
21103		}
21104		x := v_0
21105		v.copyOf(x)
21106		return true
21107	}
21108	return false
21109}
21110func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
21111	v_1 := v.Args[1]
21112	v_0 := v.Args[0]
21113	b := v.Block
21114	// match: (SHRL x (MOVQconst [c]))
21115	// result: (SHRLconst [int8(c&31)] x)
21116	for {
21117		x := v_0
21118		if v_1.Op != OpAMD64MOVQconst {
21119			break
21120		}
21121		c := auxIntToInt64(v_1.AuxInt)
21122		v.reset(OpAMD64SHRLconst)
21123		v.AuxInt = int8ToAuxInt(int8(c & 31))
21124		v.AddArg(x)
21125		return true
21126	}
21127	// match: (SHRL x (MOVLconst [c]))
21128	// result: (SHRLconst [int8(c&31)] x)
21129	for {
21130		x := v_0
21131		if v_1.Op != OpAMD64MOVLconst {
21132			break
21133		}
21134		c := auxIntToInt32(v_1.AuxInt)
21135		v.reset(OpAMD64SHRLconst)
21136		v.AuxInt = int8ToAuxInt(int8(c & 31))
21137		v.AddArg(x)
21138		return true
21139	}
21140	// match: (SHRL x (ADDQconst [c] y))
21141	// cond: c & 31 == 0
21142	// result: (SHRL x y)
21143	for {
21144		x := v_0
21145		if v_1.Op != OpAMD64ADDQconst {
21146			break
21147		}
21148		c := auxIntToInt32(v_1.AuxInt)
21149		y := v_1.Args[0]
21150		if !(c&31 == 0) {
21151			break
21152		}
21153		v.reset(OpAMD64SHRL)
21154		v.AddArg2(x, y)
21155		return true
21156	}
21157	// match: (SHRL x (NEGQ <t> (ADDQconst [c] y)))
21158	// cond: c & 31 == 0
21159	// result: (SHRL x (NEGQ <t> y))
21160	for {
21161		x := v_0
21162		if v_1.Op != OpAMD64NEGQ {
21163			break
21164		}
21165		t := v_1.Type
21166		v_1_0 := v_1.Args[0]
21167		if v_1_0.Op != OpAMD64ADDQconst {
21168			break
21169		}
21170		c := auxIntToInt32(v_1_0.AuxInt)
21171		y := v_1_0.Args[0]
21172		if !(c&31 == 0) {
21173			break
21174		}
21175		v.reset(OpAMD64SHRL)
21176		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21177		v0.AddArg(y)
21178		v.AddArg2(x, v0)
21179		return true
21180	}
21181	// match: (SHRL x (ANDQconst [c] y))
21182	// cond: c & 31 == 31
21183	// result: (SHRL x y)
21184	for {
21185		x := v_0
21186		if v_1.Op != OpAMD64ANDQconst {
21187			break
21188		}
21189		c := auxIntToInt32(v_1.AuxInt)
21190		y := v_1.Args[0]
21191		if !(c&31 == 31) {
21192			break
21193		}
21194		v.reset(OpAMD64SHRL)
21195		v.AddArg2(x, y)
21196		return true
21197	}
21198	// match: (SHRL x (NEGQ <t> (ANDQconst [c] y)))
21199	// cond: c & 31 == 31
21200	// result: (SHRL x (NEGQ <t> y))
21201	for {
21202		x := v_0
21203		if v_1.Op != OpAMD64NEGQ {
21204			break
21205		}
21206		t := v_1.Type
21207		v_1_0 := v_1.Args[0]
21208		if v_1_0.Op != OpAMD64ANDQconst {
21209			break
21210		}
21211		c := auxIntToInt32(v_1_0.AuxInt)
21212		y := v_1_0.Args[0]
21213		if !(c&31 == 31) {
21214			break
21215		}
21216		v.reset(OpAMD64SHRL)
21217		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21218		v0.AddArg(y)
21219		v.AddArg2(x, v0)
21220		return true
21221	}
21222	// match: (SHRL x (ADDLconst [c] y))
21223	// cond: c & 31 == 0
21224	// result: (SHRL x y)
21225	for {
21226		x := v_0
21227		if v_1.Op != OpAMD64ADDLconst {
21228			break
21229		}
21230		c := auxIntToInt32(v_1.AuxInt)
21231		y := v_1.Args[0]
21232		if !(c&31 == 0) {
21233			break
21234		}
21235		v.reset(OpAMD64SHRL)
21236		v.AddArg2(x, y)
21237		return true
21238	}
21239	// match: (SHRL x (NEGL <t> (ADDLconst [c] y)))
21240	// cond: c & 31 == 0
21241	// result: (SHRL x (NEGL <t> y))
21242	for {
21243		x := v_0
21244		if v_1.Op != OpAMD64NEGL {
21245			break
21246		}
21247		t := v_1.Type
21248		v_1_0 := v_1.Args[0]
21249		if v_1_0.Op != OpAMD64ADDLconst {
21250			break
21251		}
21252		c := auxIntToInt32(v_1_0.AuxInt)
21253		y := v_1_0.Args[0]
21254		if !(c&31 == 0) {
21255			break
21256		}
21257		v.reset(OpAMD64SHRL)
21258		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21259		v0.AddArg(y)
21260		v.AddArg2(x, v0)
21261		return true
21262	}
21263	// match: (SHRL x (ANDLconst [c] y))
21264	// cond: c & 31 == 31
21265	// result: (SHRL x y)
21266	for {
21267		x := v_0
21268		if v_1.Op != OpAMD64ANDLconst {
21269			break
21270		}
21271		c := auxIntToInt32(v_1.AuxInt)
21272		y := v_1.Args[0]
21273		if !(c&31 == 31) {
21274			break
21275		}
21276		v.reset(OpAMD64SHRL)
21277		v.AddArg2(x, y)
21278		return true
21279	}
21280	// match: (SHRL x (NEGL <t> (ANDLconst [c] y)))
21281	// cond: c & 31 == 31
21282	// result: (SHRL x (NEGL <t> y))
21283	for {
21284		x := v_0
21285		if v_1.Op != OpAMD64NEGL {
21286			break
21287		}
21288		t := v_1.Type
21289		v_1_0 := v_1.Args[0]
21290		if v_1_0.Op != OpAMD64ANDLconst {
21291			break
21292		}
21293		c := auxIntToInt32(v_1_0.AuxInt)
21294		y := v_1_0.Args[0]
21295		if !(c&31 == 31) {
21296			break
21297		}
21298		v.reset(OpAMD64SHRL)
21299		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21300		v0.AddArg(y)
21301		v.AddArg2(x, v0)
21302		return true
21303	}
21304	// match: (SHRL l:(MOVLload [off] {sym} ptr mem) x)
21305	// cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
21306	// result: (SHRXLload [off] {sym} ptr x mem)
21307	for {
21308		l := v_0
21309		if l.Op != OpAMD64MOVLload {
21310			break
21311		}
21312		off := auxIntToInt32(l.AuxInt)
21313		sym := auxToSym(l.Aux)
21314		mem := l.Args[1]
21315		ptr := l.Args[0]
21316		x := v_1
21317		if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21318			break
21319		}
21320		v.reset(OpAMD64SHRXLload)
21321		v.AuxInt = int32ToAuxInt(off)
21322		v.Aux = symToAux(sym)
21323		v.AddArg3(ptr, x, mem)
21324		return true
21325	}
21326	return false
21327}
21328func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
21329	v_0 := v.Args[0]
21330	// match: (SHRLconst [1] (SHLLconst [1] x))
21331	// result: (ANDLconst [0x7fffffff] x)
21332	for {
21333		if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
21334			break
21335		}
21336		x := v_0.Args[0]
21337		v.reset(OpAMD64ANDLconst)
21338		v.AuxInt = int32ToAuxInt(0x7fffffff)
21339		v.AddArg(x)
21340		return true
21341	}
21342	// match: (SHRLconst x [0])
21343	// result: x
21344	for {
21345		if auxIntToInt8(v.AuxInt) != 0 {
21346			break
21347		}
21348		x := v_0
21349		v.copyOf(x)
21350		return true
21351	}
21352	return false
21353}
21354func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
21355	v_1 := v.Args[1]
21356	v_0 := v.Args[0]
21357	b := v.Block
21358	// match: (SHRQ x (MOVQconst [c]))
21359	// result: (SHRQconst [int8(c&63)] x)
21360	for {
21361		x := v_0
21362		if v_1.Op != OpAMD64MOVQconst {
21363			break
21364		}
21365		c := auxIntToInt64(v_1.AuxInt)
21366		v.reset(OpAMD64SHRQconst)
21367		v.AuxInt = int8ToAuxInt(int8(c & 63))
21368		v.AddArg(x)
21369		return true
21370	}
21371	// match: (SHRQ x (MOVLconst [c]))
21372	// result: (SHRQconst [int8(c&63)] x)
21373	for {
21374		x := v_0
21375		if v_1.Op != OpAMD64MOVLconst {
21376			break
21377		}
21378		c := auxIntToInt32(v_1.AuxInt)
21379		v.reset(OpAMD64SHRQconst)
21380		v.AuxInt = int8ToAuxInt(int8(c & 63))
21381		v.AddArg(x)
21382		return true
21383	}
21384	// match: (SHRQ x (ADDQconst [c] y))
21385	// cond: c & 63 == 0
21386	// result: (SHRQ x y)
21387	for {
21388		x := v_0
21389		if v_1.Op != OpAMD64ADDQconst {
21390			break
21391		}
21392		c := auxIntToInt32(v_1.AuxInt)
21393		y := v_1.Args[0]
21394		if !(c&63 == 0) {
21395			break
21396		}
21397		v.reset(OpAMD64SHRQ)
21398		v.AddArg2(x, y)
21399		return true
21400	}
21401	// match: (SHRQ x (NEGQ <t> (ADDQconst [c] y)))
21402	// cond: c & 63 == 0
21403	// result: (SHRQ x (NEGQ <t> y))
21404	for {
21405		x := v_0
21406		if v_1.Op != OpAMD64NEGQ {
21407			break
21408		}
21409		t := v_1.Type
21410		v_1_0 := v_1.Args[0]
21411		if v_1_0.Op != OpAMD64ADDQconst {
21412			break
21413		}
21414		c := auxIntToInt32(v_1_0.AuxInt)
21415		y := v_1_0.Args[0]
21416		if !(c&63 == 0) {
21417			break
21418		}
21419		v.reset(OpAMD64SHRQ)
21420		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21421		v0.AddArg(y)
21422		v.AddArg2(x, v0)
21423		return true
21424	}
21425	// match: (SHRQ x (ANDQconst [c] y))
21426	// cond: c & 63 == 63
21427	// result: (SHRQ x y)
21428	for {
21429		x := v_0
21430		if v_1.Op != OpAMD64ANDQconst {
21431			break
21432		}
21433		c := auxIntToInt32(v_1.AuxInt)
21434		y := v_1.Args[0]
21435		if !(c&63 == 63) {
21436			break
21437		}
21438		v.reset(OpAMD64SHRQ)
21439		v.AddArg2(x, y)
21440		return true
21441	}
21442	// match: (SHRQ x (NEGQ <t> (ANDQconst [c] y)))
21443	// cond: c & 63 == 63
21444	// result: (SHRQ x (NEGQ <t> y))
21445	for {
21446		x := v_0
21447		if v_1.Op != OpAMD64NEGQ {
21448			break
21449		}
21450		t := v_1.Type
21451		v_1_0 := v_1.Args[0]
21452		if v_1_0.Op != OpAMD64ANDQconst {
21453			break
21454		}
21455		c := auxIntToInt32(v_1_0.AuxInt)
21456		y := v_1_0.Args[0]
21457		if !(c&63 == 63) {
21458			break
21459		}
21460		v.reset(OpAMD64SHRQ)
21461		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21462		v0.AddArg(y)
21463		v.AddArg2(x, v0)
21464		return true
21465	}
21466	// match: (SHRQ x (ADDLconst [c] y))
21467	// cond: c & 63 == 0
21468	// result: (SHRQ x y)
21469	for {
21470		x := v_0
21471		if v_1.Op != OpAMD64ADDLconst {
21472			break
21473		}
21474		c := auxIntToInt32(v_1.AuxInt)
21475		y := v_1.Args[0]
21476		if !(c&63 == 0) {
21477			break
21478		}
21479		v.reset(OpAMD64SHRQ)
21480		v.AddArg2(x, y)
21481		return true
21482	}
21483	// match: (SHRQ x (NEGL <t> (ADDLconst [c] y)))
21484	// cond: c & 63 == 0
21485	// result: (SHRQ x (NEGL <t> y))
21486	for {
21487		x := v_0
21488		if v_1.Op != OpAMD64NEGL {
21489			break
21490		}
21491		t := v_1.Type
21492		v_1_0 := v_1.Args[0]
21493		if v_1_0.Op != OpAMD64ADDLconst {
21494			break
21495		}
21496		c := auxIntToInt32(v_1_0.AuxInt)
21497		y := v_1_0.Args[0]
21498		if !(c&63 == 0) {
21499			break
21500		}
21501		v.reset(OpAMD64SHRQ)
21502		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21503		v0.AddArg(y)
21504		v.AddArg2(x, v0)
21505		return true
21506	}
21507	// match: (SHRQ x (ANDLconst [c] y))
21508	// cond: c & 63 == 63
21509	// result: (SHRQ x y)
21510	for {
21511		x := v_0
21512		if v_1.Op != OpAMD64ANDLconst {
21513			break
21514		}
21515		c := auxIntToInt32(v_1.AuxInt)
21516		y := v_1.Args[0]
21517		if !(c&63 == 63) {
21518			break
21519		}
21520		v.reset(OpAMD64SHRQ)
21521		v.AddArg2(x, y)
21522		return true
21523	}
21524	// match: (SHRQ x (NEGL <t> (ANDLconst [c] y)))
21525	// cond: c & 63 == 63
21526	// result: (SHRQ x (NEGL <t> y))
21527	for {
21528		x := v_0
21529		if v_1.Op != OpAMD64NEGL {
21530			break
21531		}
21532		t := v_1.Type
21533		v_1_0 := v_1.Args[0]
21534		if v_1_0.Op != OpAMD64ANDLconst {
21535			break
21536		}
21537		c := auxIntToInt32(v_1_0.AuxInt)
21538		y := v_1_0.Args[0]
21539		if !(c&63 == 63) {
21540			break
21541		}
21542		v.reset(OpAMD64SHRQ)
21543		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21544		v0.AddArg(y)
21545		v.AddArg2(x, v0)
21546		return true
21547	}
21548	// match: (SHRQ l:(MOVQload [off] {sym} ptr mem) x)
21549	// cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
21550	// result: (SHRXQload [off] {sym} ptr x mem)
21551	for {
21552		l := v_0
21553		if l.Op != OpAMD64MOVQload {
21554			break
21555		}
21556		off := auxIntToInt32(l.AuxInt)
21557		sym := auxToSym(l.Aux)
21558		mem := l.Args[1]
21559		ptr := l.Args[0]
21560		x := v_1
21561		if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21562			break
21563		}
21564		v.reset(OpAMD64SHRXQload)
21565		v.AuxInt = int32ToAuxInt(off)
21566		v.Aux = symToAux(sym)
21567		v.AddArg3(ptr, x, mem)
21568		return true
21569	}
21570	return false
21571}
21572func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
21573	v_0 := v.Args[0]
21574	// match: (SHRQconst [1] (SHLQconst [1] x))
21575	// result: (BTRQconst [63] x)
21576	for {
21577		if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
21578			break
21579		}
21580		x := v_0.Args[0]
21581		v.reset(OpAMD64BTRQconst)
21582		v.AuxInt = int8ToAuxInt(63)
21583		v.AddArg(x)
21584		return true
21585	}
21586	// match: (SHRQconst x [0])
21587	// result: x
21588	for {
21589		if auxIntToInt8(v.AuxInt) != 0 {
21590			break
21591		}
21592		x := v_0
21593		v.copyOf(x)
21594		return true
21595	}
21596	return false
21597}
21598func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
21599	v_1 := v.Args[1]
21600	v_0 := v.Args[0]
21601	// match: (SHRW x (MOVQconst [c]))
21602	// cond: c&31 < 16
21603	// result: (SHRWconst [int8(c&31)] x)
21604	for {
21605		x := v_0
21606		if v_1.Op != OpAMD64MOVQconst {
21607			break
21608		}
21609		c := auxIntToInt64(v_1.AuxInt)
21610		if !(c&31 < 16) {
21611			break
21612		}
21613		v.reset(OpAMD64SHRWconst)
21614		v.AuxInt = int8ToAuxInt(int8(c & 31))
21615		v.AddArg(x)
21616		return true
21617	}
21618	// match: (SHRW x (MOVLconst [c]))
21619	// cond: c&31 < 16
21620	// result: (SHRWconst [int8(c&31)] x)
21621	for {
21622		x := v_0
21623		if v_1.Op != OpAMD64MOVLconst {
21624			break
21625		}
21626		c := auxIntToInt32(v_1.AuxInt)
21627		if !(c&31 < 16) {
21628			break
21629		}
21630		v.reset(OpAMD64SHRWconst)
21631		v.AuxInt = int8ToAuxInt(int8(c & 31))
21632		v.AddArg(x)
21633		return true
21634	}
21635	// match: (SHRW _ (MOVQconst [c]))
21636	// cond: c&31 >= 16
21637	// result: (MOVLconst [0])
21638	for {
21639		if v_1.Op != OpAMD64MOVQconst {
21640			break
21641		}
21642		c := auxIntToInt64(v_1.AuxInt)
21643		if !(c&31 >= 16) {
21644			break
21645		}
21646		v.reset(OpAMD64MOVLconst)
21647		v.AuxInt = int32ToAuxInt(0)
21648		return true
21649	}
21650	// match: (SHRW _ (MOVLconst [c]))
21651	// cond: c&31 >= 16
21652	// result: (MOVLconst [0])
21653	for {
21654		if v_1.Op != OpAMD64MOVLconst {
21655			break
21656		}
21657		c := auxIntToInt32(v_1.AuxInt)
21658		if !(c&31 >= 16) {
21659			break
21660		}
21661		v.reset(OpAMD64MOVLconst)
21662		v.AuxInt = int32ToAuxInt(0)
21663		return true
21664	}
21665	return false
21666}
21667func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
21668	v_0 := v.Args[0]
21669	// match: (SHRWconst x [0])
21670	// result: x
21671	for {
21672		if auxIntToInt8(v.AuxInt) != 0 {
21673			break
21674		}
21675		x := v_0
21676		v.copyOf(x)
21677		return true
21678	}
21679	return false
21680}
21681func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool {
21682	v_2 := v.Args[2]
21683	v_1 := v.Args[1]
21684	v_0 := v.Args[0]
21685	b := v.Block
21686	typ := &b.Func.Config.Types
21687	// match: (SHRXLload [off] {sym} ptr (MOVLconst [c]) mem)
21688	// result: (SHRLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
21689	for {
21690		off := auxIntToInt32(v.AuxInt)
21691		sym := auxToSym(v.Aux)
21692		ptr := v_0
21693		if v_1.Op != OpAMD64MOVLconst {
21694			break
21695		}
21696		c := auxIntToInt32(v_1.AuxInt)
21697		mem := v_2
21698		v.reset(OpAMD64SHRLconst)
21699		v.AuxInt = int8ToAuxInt(int8(c & 31))
21700		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
21701		v0.AuxInt = int32ToAuxInt(off)
21702		v0.Aux = symToAux(sym)
21703		v0.AddArg2(ptr, mem)
21704		v.AddArg(v0)
21705		return true
21706	}
21707	return false
21708}
21709func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool {
21710	v_2 := v.Args[2]
21711	v_1 := v.Args[1]
21712	v_0 := v.Args[0]
21713	b := v.Block
21714	typ := &b.Func.Config.Types
21715	// match: (SHRXQload [off] {sym} ptr (MOVQconst [c]) mem)
21716	// result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
21717	for {
21718		off := auxIntToInt32(v.AuxInt)
21719		sym := auxToSym(v.Aux)
21720		ptr := v_0
21721		if v_1.Op != OpAMD64MOVQconst {
21722			break
21723		}
21724		c := auxIntToInt64(v_1.AuxInt)
21725		mem := v_2
21726		v.reset(OpAMD64SHRQconst)
21727		v.AuxInt = int8ToAuxInt(int8(c & 63))
21728		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21729		v0.AuxInt = int32ToAuxInt(off)
21730		v0.Aux = symToAux(sym)
21731		v0.AddArg2(ptr, mem)
21732		v.AddArg(v0)
21733		return true
21734	}
21735	// match: (SHRXQload [off] {sym} ptr (MOVLconst [c]) mem)
21736	// result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
21737	for {
21738		off := auxIntToInt32(v.AuxInt)
21739		sym := auxToSym(v.Aux)
21740		ptr := v_0
21741		if v_1.Op != OpAMD64MOVLconst {
21742			break
21743		}
21744		c := auxIntToInt32(v_1.AuxInt)
21745		mem := v_2
21746		v.reset(OpAMD64SHRQconst)
21747		v.AuxInt = int8ToAuxInt(int8(c & 63))
21748		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21749		v0.AuxInt = int32ToAuxInt(off)
21750		v0.Aux = symToAux(sym)
21751		v0.AddArg2(ptr, mem)
21752		v.AddArg(v0)
21753		return true
21754	}
21755	return false
21756}
21757func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
21758	v_1 := v.Args[1]
21759	v_0 := v.Args[0]
21760	b := v.Block
21761	// match: (SUBL x (MOVLconst [c]))
21762	// result: (SUBLconst x [c])
21763	for {
21764		x := v_0
21765		if v_1.Op != OpAMD64MOVLconst {
21766			break
21767		}
21768		c := auxIntToInt32(v_1.AuxInt)
21769		v.reset(OpAMD64SUBLconst)
21770		v.AuxInt = int32ToAuxInt(c)
21771		v.AddArg(x)
21772		return true
21773	}
21774	// match: (SUBL (MOVLconst [c]) x)
21775	// result: (NEGL (SUBLconst <v.Type> x [c]))
21776	for {
21777		if v_0.Op != OpAMD64MOVLconst {
21778			break
21779		}
21780		c := auxIntToInt32(v_0.AuxInt)
21781		x := v_1
21782		v.reset(OpAMD64NEGL)
21783		v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
21784		v0.AuxInt = int32ToAuxInt(c)
21785		v0.AddArg(x)
21786		v.AddArg(v0)
21787		return true
21788	}
21789	// match: (SUBL x x)
21790	// result: (MOVLconst [0])
21791	for {
21792		x := v_0
21793		if x != v_1 {
21794			break
21795		}
21796		v.reset(OpAMD64MOVLconst)
21797		v.AuxInt = int32ToAuxInt(0)
21798		return true
21799	}
21800	// match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
21801	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
21802	// result: (SUBLload x [off] {sym} ptr mem)
21803	for {
21804		x := v_0
21805		l := v_1
21806		if l.Op != OpAMD64MOVLload {
21807			break
21808		}
21809		off := auxIntToInt32(l.AuxInt)
21810		sym := auxToSym(l.Aux)
21811		mem := l.Args[1]
21812		ptr := l.Args[0]
21813		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
21814			break
21815		}
21816		v.reset(OpAMD64SUBLload)
21817		v.AuxInt = int32ToAuxInt(off)
21818		v.Aux = symToAux(sym)
21819		v.AddArg3(x, ptr, mem)
21820		return true
21821	}
21822	return false
21823}
21824func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
21825	v_0 := v.Args[0]
21826	// match: (SUBLconst [c] x)
21827	// cond: c==0
21828	// result: x
21829	for {
21830		c := auxIntToInt32(v.AuxInt)
21831		x := v_0
21832		if !(c == 0) {
21833			break
21834		}
21835		v.copyOf(x)
21836		return true
21837	}
21838	// match: (SUBLconst [c] x)
21839	// result: (ADDLconst [-c] x)
21840	for {
21841		c := auxIntToInt32(v.AuxInt)
21842		x := v_0
21843		v.reset(OpAMD64ADDLconst)
21844		v.AuxInt = int32ToAuxInt(-c)
21845		v.AddArg(x)
21846		return true
21847	}
21848}
21849func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
21850	v_2 := v.Args[2]
21851	v_1 := v.Args[1]
21852	v_0 := v.Args[0]
21853	b := v.Block
21854	typ := &b.Func.Config.Types
21855	// match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem)
21856	// cond: is32Bit(int64(off1)+int64(off2))
21857	// result: (SUBLload [off1+off2] {sym} val base mem)
21858	for {
21859		off1 := auxIntToInt32(v.AuxInt)
21860		sym := auxToSym(v.Aux)
21861		val := v_0
21862		if v_1.Op != OpAMD64ADDQconst {
21863			break
21864		}
21865		off2 := auxIntToInt32(v_1.AuxInt)
21866		base := v_1.Args[0]
21867		mem := v_2
21868		if !(is32Bit(int64(off1) + int64(off2))) {
21869			break
21870		}
21871		v.reset(OpAMD64SUBLload)
21872		v.AuxInt = int32ToAuxInt(off1 + off2)
21873		v.Aux = symToAux(sym)
21874		v.AddArg3(val, base, mem)
21875		return true
21876	}
21877	// match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
21878	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
21879	// result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
21880	for {
21881		off1 := auxIntToInt32(v.AuxInt)
21882		sym1 := auxToSym(v.Aux)
21883		val := v_0
21884		if v_1.Op != OpAMD64LEAQ {
21885			break
21886		}
21887		off2 := auxIntToInt32(v_1.AuxInt)
21888		sym2 := auxToSym(v_1.Aux)
21889		base := v_1.Args[0]
21890		mem := v_2
21891		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21892			break
21893		}
21894		v.reset(OpAMD64SUBLload)
21895		v.AuxInt = int32ToAuxInt(off1 + off2)
21896		v.Aux = symToAux(mergeSym(sym1, sym2))
21897		v.AddArg3(val, base, mem)
21898		return true
21899	}
21900	// match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
21901	// result: (SUBL x (MOVLf2i y))
21902	for {
21903		off := auxIntToInt32(v.AuxInt)
21904		sym := auxToSym(v.Aux)
21905		x := v_0
21906		ptr := v_1
21907		if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
21908			break
21909		}
21910		y := v_2.Args[1]
21911		if ptr != v_2.Args[0] {
21912			break
21913		}
21914		v.reset(OpAMD64SUBL)
21915		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
21916		v0.AddArg(y)
21917		v.AddArg2(x, v0)
21918		return true
21919	}
21920	return false
21921}
21922func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
21923	v_2 := v.Args[2]
21924	v_1 := v.Args[1]
21925	v_0 := v.Args[0]
21926	// match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
21927	// cond: is32Bit(int64(off1)+int64(off2))
21928	// result: (SUBLmodify [off1+off2] {sym} base val mem)
21929	for {
21930		off1 := auxIntToInt32(v.AuxInt)
21931		sym := auxToSym(v.Aux)
21932		if v_0.Op != OpAMD64ADDQconst {
21933			break
21934		}
21935		off2 := auxIntToInt32(v_0.AuxInt)
21936		base := v_0.Args[0]
21937		val := v_1
21938		mem := v_2
21939		if !(is32Bit(int64(off1) + int64(off2))) {
21940			break
21941		}
21942		v.reset(OpAMD64SUBLmodify)
21943		v.AuxInt = int32ToAuxInt(off1 + off2)
21944		v.Aux = symToAux(sym)
21945		v.AddArg3(base, val, mem)
21946		return true
21947	}
21948	// match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
21949	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
21950	// result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
21951	for {
21952		off1 := auxIntToInt32(v.AuxInt)
21953		sym1 := auxToSym(v.Aux)
21954		if v_0.Op != OpAMD64LEAQ {
21955			break
21956		}
21957		off2 := auxIntToInt32(v_0.AuxInt)
21958		sym2 := auxToSym(v_0.Aux)
21959		base := v_0.Args[0]
21960		val := v_1
21961		mem := v_2
21962		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21963			break
21964		}
21965		v.reset(OpAMD64SUBLmodify)
21966		v.AuxInt = int32ToAuxInt(off1 + off2)
21967		v.Aux = symToAux(mergeSym(sym1, sym2))
21968		v.AddArg3(base, val, mem)
21969		return true
21970	}
21971	return false
21972}
21973func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
21974	v_1 := v.Args[1]
21975	v_0 := v.Args[0]
21976	b := v.Block
21977	// match: (SUBQ x (MOVQconst [c]))
21978	// cond: is32Bit(c)
21979	// result: (SUBQconst x [int32(c)])
21980	for {
21981		x := v_0
21982		if v_1.Op != OpAMD64MOVQconst {
21983			break
21984		}
21985		c := auxIntToInt64(v_1.AuxInt)
21986		if !(is32Bit(c)) {
21987			break
21988		}
21989		v.reset(OpAMD64SUBQconst)
21990		v.AuxInt = int32ToAuxInt(int32(c))
21991		v.AddArg(x)
21992		return true
21993	}
21994	// match: (SUBQ (MOVQconst [c]) x)
21995	// cond: is32Bit(c)
21996	// result: (NEGQ (SUBQconst <v.Type> x [int32(c)]))
21997	for {
21998		if v_0.Op != OpAMD64MOVQconst {
21999			break
22000		}
22001		c := auxIntToInt64(v_0.AuxInt)
22002		x := v_1
22003		if !(is32Bit(c)) {
22004			break
22005		}
22006		v.reset(OpAMD64NEGQ)
22007		v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
22008		v0.AuxInt = int32ToAuxInt(int32(c))
22009		v0.AddArg(x)
22010		v.AddArg(v0)
22011		return true
22012	}
22013	// match: (SUBQ x x)
22014	// result: (MOVQconst [0])
22015	for {
22016		x := v_0
22017		if x != v_1 {
22018			break
22019		}
22020		v.reset(OpAMD64MOVQconst)
22021		v.AuxInt = int64ToAuxInt(0)
22022		return true
22023	}
22024	// match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
22025	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
22026	// result: (SUBQload x [off] {sym} ptr mem)
22027	for {
22028		x := v_0
22029		l := v_1
22030		if l.Op != OpAMD64MOVQload {
22031			break
22032		}
22033		off := auxIntToInt32(l.AuxInt)
22034		sym := auxToSym(l.Aux)
22035		mem := l.Args[1]
22036		ptr := l.Args[0]
22037		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22038			break
22039		}
22040		v.reset(OpAMD64SUBQload)
22041		v.AuxInt = int32ToAuxInt(off)
22042		v.Aux = symToAux(sym)
22043		v.AddArg3(x, ptr, mem)
22044		return true
22045	}
22046	return false
22047}
22048func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
22049	v_1 := v.Args[1]
22050	v_0 := v.Args[0]
22051	// match: (SUBQborrow x (MOVQconst [c]))
22052	// cond: is32Bit(c)
22053	// result: (SUBQconstborrow x [int32(c)])
22054	for {
22055		x := v_0
22056		if v_1.Op != OpAMD64MOVQconst {
22057			break
22058		}
22059		c := auxIntToInt64(v_1.AuxInt)
22060		if !(is32Bit(c)) {
22061			break
22062		}
22063		v.reset(OpAMD64SUBQconstborrow)
22064		v.AuxInt = int32ToAuxInt(int32(c))
22065		v.AddArg(x)
22066		return true
22067	}
22068	return false
22069}
22070func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
22071	v_0 := v.Args[0]
22072	// match: (SUBQconst [0] x)
22073	// result: x
22074	for {
22075		if auxIntToInt32(v.AuxInt) != 0 {
22076			break
22077		}
22078		x := v_0
22079		v.copyOf(x)
22080		return true
22081	}
22082	// match: (SUBQconst [c] x)
22083	// cond: c != -(1<<31)
22084	// result: (ADDQconst [-c] x)
22085	for {
22086		c := auxIntToInt32(v.AuxInt)
22087		x := v_0
22088		if !(c != -(1 << 31)) {
22089			break
22090		}
22091		v.reset(OpAMD64ADDQconst)
22092		v.AuxInt = int32ToAuxInt(-c)
22093		v.AddArg(x)
22094		return true
22095	}
22096	// match: (SUBQconst (MOVQconst [d]) [c])
22097	// result: (MOVQconst [d-int64(c)])
22098	for {
22099		c := auxIntToInt32(v.AuxInt)
22100		if v_0.Op != OpAMD64MOVQconst {
22101			break
22102		}
22103		d := auxIntToInt64(v_0.AuxInt)
22104		v.reset(OpAMD64MOVQconst)
22105		v.AuxInt = int64ToAuxInt(d - int64(c))
22106		return true
22107	}
22108	// match: (SUBQconst (SUBQconst x [d]) [c])
22109	// cond: is32Bit(int64(-c)-int64(d))
22110	// result: (ADDQconst [-c-d] x)
22111	for {
22112		c := auxIntToInt32(v.AuxInt)
22113		if v_0.Op != OpAMD64SUBQconst {
22114			break
22115		}
22116		d := auxIntToInt32(v_0.AuxInt)
22117		x := v_0.Args[0]
22118		if !(is32Bit(int64(-c) - int64(d))) {
22119			break
22120		}
22121		v.reset(OpAMD64ADDQconst)
22122		v.AuxInt = int32ToAuxInt(-c - d)
22123		v.AddArg(x)
22124		return true
22125	}
22126	return false
22127}
22128func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
22129	v_2 := v.Args[2]
22130	v_1 := v.Args[1]
22131	v_0 := v.Args[0]
22132	b := v.Block
22133	typ := &b.Func.Config.Types
22134	// match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem)
22135	// cond: is32Bit(int64(off1)+int64(off2))
22136	// result: (SUBQload [off1+off2] {sym} val base mem)
22137	for {
22138		off1 := auxIntToInt32(v.AuxInt)
22139		sym := auxToSym(v.Aux)
22140		val := v_0
22141		if v_1.Op != OpAMD64ADDQconst {
22142			break
22143		}
22144		off2 := auxIntToInt32(v_1.AuxInt)
22145		base := v_1.Args[0]
22146		mem := v_2
22147		if !(is32Bit(int64(off1) + int64(off2))) {
22148			break
22149		}
22150		v.reset(OpAMD64SUBQload)
22151		v.AuxInt = int32ToAuxInt(off1 + off2)
22152		v.Aux = symToAux(sym)
22153		v.AddArg3(val, base, mem)
22154		return true
22155	}
22156	// match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
22157	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
22158	// result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
22159	for {
22160		off1 := auxIntToInt32(v.AuxInt)
22161		sym1 := auxToSym(v.Aux)
22162		val := v_0
22163		if v_1.Op != OpAMD64LEAQ {
22164			break
22165		}
22166		off2 := auxIntToInt32(v_1.AuxInt)
22167		sym2 := auxToSym(v_1.Aux)
22168		base := v_1.Args[0]
22169		mem := v_2
22170		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22171			break
22172		}
22173		v.reset(OpAMD64SUBQload)
22174		v.AuxInt = int32ToAuxInt(off1 + off2)
22175		v.Aux = symToAux(mergeSym(sym1, sym2))
22176		v.AddArg3(val, base, mem)
22177		return true
22178	}
22179	// match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
22180	// result: (SUBQ x (MOVQf2i y))
22181	for {
22182		off := auxIntToInt32(v.AuxInt)
22183		sym := auxToSym(v.Aux)
22184		x := v_0
22185		ptr := v_1
22186		if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22187			break
22188		}
22189		y := v_2.Args[1]
22190		if ptr != v_2.Args[0] {
22191			break
22192		}
22193		v.reset(OpAMD64SUBQ)
22194		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
22195		v0.AddArg(y)
22196		v.AddArg2(x, v0)
22197		return true
22198	}
22199	return false
22200}
22201func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
22202	v_2 := v.Args[2]
22203	v_1 := v.Args[1]
22204	v_0 := v.Args[0]
22205	// match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
22206	// cond: is32Bit(int64(off1)+int64(off2))
22207	// result: (SUBQmodify [off1+off2] {sym} base val mem)
22208	for {
22209		off1 := auxIntToInt32(v.AuxInt)
22210		sym := auxToSym(v.Aux)
22211		if v_0.Op != OpAMD64ADDQconst {
22212			break
22213		}
22214		off2 := auxIntToInt32(v_0.AuxInt)
22215		base := v_0.Args[0]
22216		val := v_1
22217		mem := v_2
22218		if !(is32Bit(int64(off1) + int64(off2))) {
22219			break
22220		}
22221		v.reset(OpAMD64SUBQmodify)
22222		v.AuxInt = int32ToAuxInt(off1 + off2)
22223		v.Aux = symToAux(sym)
22224		v.AddArg3(base, val, mem)
22225		return true
22226	}
22227	// match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
22228	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
22229	// result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
22230	for {
22231		off1 := auxIntToInt32(v.AuxInt)
22232		sym1 := auxToSym(v.Aux)
22233		if v_0.Op != OpAMD64LEAQ {
22234			break
22235		}
22236		off2 := auxIntToInt32(v_0.AuxInt)
22237		sym2 := auxToSym(v_0.Aux)
22238		base := v_0.Args[0]
22239		val := v_1
22240		mem := v_2
22241		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22242			break
22243		}
22244		v.reset(OpAMD64SUBQmodify)
22245		v.AuxInt = int32ToAuxInt(off1 + off2)
22246		v.Aux = symToAux(mergeSym(sym1, sym2))
22247		v.AddArg3(base, val, mem)
22248		return true
22249	}
22250	return false
22251}
22252func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
22253	v_1 := v.Args[1]
22254	v_0 := v.Args[0]
22255	// match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
22256	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
22257	// result: (SUBSDload x [off] {sym} ptr mem)
22258	for {
22259		x := v_0
22260		l := v_1
22261		if l.Op != OpAMD64MOVSDload {
22262			break
22263		}
22264		off := auxIntToInt32(l.AuxInt)
22265		sym := auxToSym(l.Aux)
22266		mem := l.Args[1]
22267		ptr := l.Args[0]
22268		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22269			break
22270		}
22271		v.reset(OpAMD64SUBSDload)
22272		v.AuxInt = int32ToAuxInt(off)
22273		v.Aux = symToAux(sym)
22274		v.AddArg3(x, ptr, mem)
22275		return true
22276	}
22277	return false
22278}
22279func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
22280	v_2 := v.Args[2]
22281	v_1 := v.Args[1]
22282	v_0 := v.Args[0]
22283	b := v.Block
22284	typ := &b.Func.Config.Types
22285	// match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem)
22286	// cond: is32Bit(int64(off1)+int64(off2))
22287	// result: (SUBSDload [off1+off2] {sym} val base mem)
22288	for {
22289		off1 := auxIntToInt32(v.AuxInt)
22290		sym := auxToSym(v.Aux)
22291		val := v_0
22292		if v_1.Op != OpAMD64ADDQconst {
22293			break
22294		}
22295		off2 := auxIntToInt32(v_1.AuxInt)
22296		base := v_1.Args[0]
22297		mem := v_2
22298		if !(is32Bit(int64(off1) + int64(off2))) {
22299			break
22300		}
22301		v.reset(OpAMD64SUBSDload)
22302		v.AuxInt = int32ToAuxInt(off1 + off2)
22303		v.Aux = symToAux(sym)
22304		v.AddArg3(val, base, mem)
22305		return true
22306	}
22307	// match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
22308	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
22309	// result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
22310	for {
22311		off1 := auxIntToInt32(v.AuxInt)
22312		sym1 := auxToSym(v.Aux)
22313		val := v_0
22314		if v_1.Op != OpAMD64LEAQ {
22315			break
22316		}
22317		off2 := auxIntToInt32(v_1.AuxInt)
22318		sym2 := auxToSym(v_1.Aux)
22319		base := v_1.Args[0]
22320		mem := v_2
22321		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22322			break
22323		}
22324		v.reset(OpAMD64SUBSDload)
22325		v.AuxInt = int32ToAuxInt(off1 + off2)
22326		v.Aux = symToAux(mergeSym(sym1, sym2))
22327		v.AddArg3(val, base, mem)
22328		return true
22329	}
22330	// match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
22331	// result: (SUBSD x (MOVQi2f y))
22332	for {
22333		off := auxIntToInt32(v.AuxInt)
22334		sym := auxToSym(v.Aux)
22335		x := v_0
22336		ptr := v_1
22337		if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22338			break
22339		}
22340		y := v_2.Args[1]
22341		if ptr != v_2.Args[0] {
22342			break
22343		}
22344		v.reset(OpAMD64SUBSD)
22345		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
22346		v0.AddArg(y)
22347		v.AddArg2(x, v0)
22348		return true
22349	}
22350	return false
22351}
22352func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
22353	v_1 := v.Args[1]
22354	v_0 := v.Args[0]
22355	// match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
22356	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
22357	// result: (SUBSSload x [off] {sym} ptr mem)
22358	for {
22359		x := v_0
22360		l := v_1
22361		if l.Op != OpAMD64MOVSSload {
22362			break
22363		}
22364		off := auxIntToInt32(l.AuxInt)
22365		sym := auxToSym(l.Aux)
22366		mem := l.Args[1]
22367		ptr := l.Args[0]
22368		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22369			break
22370		}
22371		v.reset(OpAMD64SUBSSload)
22372		v.AuxInt = int32ToAuxInt(off)
22373		v.Aux = symToAux(sym)
22374		v.AddArg3(x, ptr, mem)
22375		return true
22376	}
22377	return false
22378}
22379func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
22380	v_2 := v.Args[2]
22381	v_1 := v.Args[1]
22382	v_0 := v.Args[0]
22383	b := v.Block
22384	typ := &b.Func.Config.Types
22385	// match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem)
22386	// cond: is32Bit(int64(off1)+int64(off2))
22387	// result: (SUBSSload [off1+off2] {sym} val base mem)
22388	for {
22389		off1 := auxIntToInt32(v.AuxInt)
22390		sym := auxToSym(v.Aux)
22391		val := v_0
22392		if v_1.Op != OpAMD64ADDQconst {
22393			break
22394		}
22395		off2 := auxIntToInt32(v_1.AuxInt)
22396		base := v_1.Args[0]
22397		mem := v_2
22398		if !(is32Bit(int64(off1) + int64(off2))) {
22399			break
22400		}
22401		v.reset(OpAMD64SUBSSload)
22402		v.AuxInt = int32ToAuxInt(off1 + off2)
22403		v.Aux = symToAux(sym)
22404		v.AddArg3(val, base, mem)
22405		return true
22406	}
22407	// match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
22408	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
22409	// result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
22410	for {
22411		off1 := auxIntToInt32(v.AuxInt)
22412		sym1 := auxToSym(v.Aux)
22413		val := v_0
22414		if v_1.Op != OpAMD64LEAQ {
22415			break
22416		}
22417		off2 := auxIntToInt32(v_1.AuxInt)
22418		sym2 := auxToSym(v_1.Aux)
22419		base := v_1.Args[0]
22420		mem := v_2
22421		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22422			break
22423		}
22424		v.reset(OpAMD64SUBSSload)
22425		v.AuxInt = int32ToAuxInt(off1 + off2)
22426		v.Aux = symToAux(mergeSym(sym1, sym2))
22427		v.AddArg3(val, base, mem)
22428		return true
22429	}
22430	// match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
22431	// result: (SUBSS x (MOVLi2f y))
22432	for {
22433		off := auxIntToInt32(v.AuxInt)
22434		sym := auxToSym(v.Aux)
22435		x := v_0
22436		ptr := v_1
22437		if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22438			break
22439		}
22440		y := v_2.Args[1]
22441		if ptr != v_2.Args[0] {
22442			break
22443		}
22444		v.reset(OpAMD64SUBSS)
22445		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
22446		v0.AddArg(y)
22447		v.AddArg2(x, v0)
22448		return true
22449	}
22450	return false
22451}
22452func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
22453	v_1 := v.Args[1]
22454	v_0 := v.Args[0]
22455	b := v.Block
22456	// match: (TESTB (MOVLconst [c]) x)
22457	// result: (TESTBconst [int8(c)] x)
22458	for {
22459		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22460			if v_0.Op != OpAMD64MOVLconst {
22461				continue
22462			}
22463			c := auxIntToInt32(v_0.AuxInt)
22464			x := v_1
22465			v.reset(OpAMD64TESTBconst)
22466			v.AuxInt = int8ToAuxInt(int8(c))
22467			v.AddArg(x)
22468			return true
22469		}
22470		break
22471	}
22472	// match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
22473	// cond: l == l2 && l.Uses == 2 && clobber(l)
22474	// result: @l.Block (CMPBconstload {sym} [makeValAndOff(0, off)] ptr mem)
22475	for {
22476		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22477			l := v_0
22478			if l.Op != OpAMD64MOVBload {
22479				continue
22480			}
22481			off := auxIntToInt32(l.AuxInt)
22482			sym := auxToSym(l.Aux)
22483			mem := l.Args[1]
22484			ptr := l.Args[0]
22485			l2 := v_1
22486			if !(l == l2 && l.Uses == 2 && clobber(l)) {
22487				continue
22488			}
22489			b = l.Block
22490			v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
22491			v.copyOf(v0)
22492			v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22493			v0.Aux = symToAux(sym)
22494			v0.AddArg2(ptr, mem)
22495			return true
22496		}
22497		break
22498	}
22499	return false
22500}
22501func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
22502	v_0 := v.Args[0]
22503	// match: (TESTBconst [-1] x)
22504	// cond: x.Op != OpAMD64MOVLconst
22505	// result: (TESTB x x)
22506	for {
22507		if auxIntToInt8(v.AuxInt) != -1 {
22508			break
22509		}
22510		x := v_0
22511		if !(x.Op != OpAMD64MOVLconst) {
22512			break
22513		}
22514		v.reset(OpAMD64TESTB)
22515		v.AddArg2(x, x)
22516		return true
22517	}
22518	return false
22519}
22520func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
22521	v_1 := v.Args[1]
22522	v_0 := v.Args[0]
22523	b := v.Block
22524	// match: (TESTL (MOVLconst [c]) x)
22525	// result: (TESTLconst [c] x)
22526	for {
22527		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22528			if v_0.Op != OpAMD64MOVLconst {
22529				continue
22530			}
22531			c := auxIntToInt32(v_0.AuxInt)
22532			x := v_1
22533			v.reset(OpAMD64TESTLconst)
22534			v.AuxInt = int32ToAuxInt(c)
22535			v.AddArg(x)
22536			return true
22537		}
22538		break
22539	}
22540	// match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
22541	// cond: l == l2 && l.Uses == 2 && clobber(l)
22542	// result: @l.Block (CMPLconstload {sym} [makeValAndOff(0, off)] ptr mem)
22543	for {
22544		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22545			l := v_0
22546			if l.Op != OpAMD64MOVLload {
22547				continue
22548			}
22549			off := auxIntToInt32(l.AuxInt)
22550			sym := auxToSym(l.Aux)
22551			mem := l.Args[1]
22552			ptr := l.Args[0]
22553			l2 := v_1
22554			if !(l == l2 && l.Uses == 2 && clobber(l)) {
22555				continue
22556			}
22557			b = l.Block
22558			v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
22559			v.copyOf(v0)
22560			v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22561			v0.Aux = symToAux(sym)
22562			v0.AddArg2(ptr, mem)
22563			return true
22564		}
22565		break
22566	}
22567	// match: (TESTL a:(ANDLload [off] {sym} x ptr mem) a)
22568	// cond: a.Uses == 2 && a.Block == v.Block && clobber(a)
22569	// result: (TESTL (MOVLload <a.Type> [off] {sym} ptr mem) x)
22570	for {
22571		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22572			a := v_0
22573			if a.Op != OpAMD64ANDLload {
22574				continue
22575			}
22576			off := auxIntToInt32(a.AuxInt)
22577			sym := auxToSym(a.Aux)
22578			mem := a.Args[2]
22579			x := a.Args[0]
22580			ptr := a.Args[1]
22581			if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
22582				continue
22583			}
22584			v.reset(OpAMD64TESTL)
22585			v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
22586			v0.AuxInt = int32ToAuxInt(off)
22587			v0.Aux = symToAux(sym)
22588			v0.AddArg2(ptr, mem)
22589			v.AddArg2(v0, x)
22590			return true
22591		}
22592		break
22593	}
22594	return false
22595}
22596func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
22597	v_0 := v.Args[0]
22598	// match: (TESTLconst [c] (MOVLconst [c]))
22599	// cond: c == 0
22600	// result: (FlagEQ)
22601	for {
22602		c := auxIntToInt32(v.AuxInt)
22603		if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
22604			break
22605		}
22606		v.reset(OpAMD64FlagEQ)
22607		return true
22608	}
22609	// match: (TESTLconst [c] (MOVLconst [c]))
22610	// cond: c < 0
22611	// result: (FlagLT_UGT)
22612	for {
22613		c := auxIntToInt32(v.AuxInt)
22614		if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
22615			break
22616		}
22617		v.reset(OpAMD64FlagLT_UGT)
22618		return true
22619	}
22620	// match: (TESTLconst [c] (MOVLconst [c]))
22621	// cond: c > 0
22622	// result: (FlagGT_UGT)
22623	for {
22624		c := auxIntToInt32(v.AuxInt)
22625		if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
22626			break
22627		}
22628		v.reset(OpAMD64FlagGT_UGT)
22629		return true
22630	}
22631	// match: (TESTLconst [-1] x)
22632	// cond: x.Op != OpAMD64MOVLconst
22633	// result: (TESTL x x)
22634	for {
22635		if auxIntToInt32(v.AuxInt) != -1 {
22636			break
22637		}
22638		x := v_0
22639		if !(x.Op != OpAMD64MOVLconst) {
22640			break
22641		}
22642		v.reset(OpAMD64TESTL)
22643		v.AddArg2(x, x)
22644		return true
22645	}
22646	return false
22647}
22648func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
22649	v_1 := v.Args[1]
22650	v_0 := v.Args[0]
22651	b := v.Block
22652	// match: (TESTQ (MOVQconst [c]) x)
22653	// cond: is32Bit(c)
22654	// result: (TESTQconst [int32(c)] x)
22655	for {
22656		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22657			if v_0.Op != OpAMD64MOVQconst {
22658				continue
22659			}
22660			c := auxIntToInt64(v_0.AuxInt)
22661			x := v_1
22662			if !(is32Bit(c)) {
22663				continue
22664			}
22665			v.reset(OpAMD64TESTQconst)
22666			v.AuxInt = int32ToAuxInt(int32(c))
22667			v.AddArg(x)
22668			return true
22669		}
22670		break
22671	}
22672	// match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
22673	// cond: l == l2 && l.Uses == 2 && clobber(l)
22674	// result: @l.Block (CMPQconstload {sym} [makeValAndOff(0, off)] ptr mem)
22675	for {
22676		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22677			l := v_0
22678			if l.Op != OpAMD64MOVQload {
22679				continue
22680			}
22681			off := auxIntToInt32(l.AuxInt)
22682			sym := auxToSym(l.Aux)
22683			mem := l.Args[1]
22684			ptr := l.Args[0]
22685			l2 := v_1
22686			if !(l == l2 && l.Uses == 2 && clobber(l)) {
22687				continue
22688			}
22689			b = l.Block
22690			v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
22691			v.copyOf(v0)
22692			v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22693			v0.Aux = symToAux(sym)
22694			v0.AddArg2(ptr, mem)
22695			return true
22696		}
22697		break
22698	}
22699	// match: (TESTQ a:(ANDQload [off] {sym} x ptr mem) a)
22700	// cond: a.Uses == 2 && a.Block == v.Block && clobber(a)
22701	// result: (TESTQ (MOVQload <a.Type> [off] {sym} ptr mem) x)
22702	for {
22703		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22704			a := v_0
22705			if a.Op != OpAMD64ANDQload {
22706				continue
22707			}
22708			off := auxIntToInt32(a.AuxInt)
22709			sym := auxToSym(a.Aux)
22710			mem := a.Args[2]
22711			x := a.Args[0]
22712			ptr := a.Args[1]
22713			if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
22714				continue
22715			}
22716			v.reset(OpAMD64TESTQ)
22717			v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
22718			v0.AuxInt = int32ToAuxInt(off)
22719			v0.Aux = symToAux(sym)
22720			v0.AddArg2(ptr, mem)
22721			v.AddArg2(v0, x)
22722			return true
22723		}
22724		break
22725	}
22726	return false
22727}
22728func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
22729	v_0 := v.Args[0]
22730	// match: (TESTQconst [c] (MOVQconst [d]))
22731	// cond: int64(c) == d && c == 0
22732	// result: (FlagEQ)
22733	for {
22734		c := auxIntToInt32(v.AuxInt)
22735		if v_0.Op != OpAMD64MOVQconst {
22736			break
22737		}
22738		d := auxIntToInt64(v_0.AuxInt)
22739		if !(int64(c) == d && c == 0) {
22740			break
22741		}
22742		v.reset(OpAMD64FlagEQ)
22743		return true
22744	}
22745	// match: (TESTQconst [c] (MOVQconst [d]))
22746	// cond: int64(c) == d && c < 0
22747	// result: (FlagLT_UGT)
22748	for {
22749		c := auxIntToInt32(v.AuxInt)
22750		if v_0.Op != OpAMD64MOVQconst {
22751			break
22752		}
22753		d := auxIntToInt64(v_0.AuxInt)
22754		if !(int64(c) == d && c < 0) {
22755			break
22756		}
22757		v.reset(OpAMD64FlagLT_UGT)
22758		return true
22759	}
22760	// match: (TESTQconst [c] (MOVQconst [d]))
22761	// cond: int64(c) == d && c > 0
22762	// result: (FlagGT_UGT)
22763	for {
22764		c := auxIntToInt32(v.AuxInt)
22765		if v_0.Op != OpAMD64MOVQconst {
22766			break
22767		}
22768		d := auxIntToInt64(v_0.AuxInt)
22769		if !(int64(c) == d && c > 0) {
22770			break
22771		}
22772		v.reset(OpAMD64FlagGT_UGT)
22773		return true
22774	}
22775	// match: (TESTQconst [-1] x)
22776	// cond: x.Op != OpAMD64MOVQconst
22777	// result: (TESTQ x x)
22778	for {
22779		if auxIntToInt32(v.AuxInt) != -1 {
22780			break
22781		}
22782		x := v_0
22783		if !(x.Op != OpAMD64MOVQconst) {
22784			break
22785		}
22786		v.reset(OpAMD64TESTQ)
22787		v.AddArg2(x, x)
22788		return true
22789	}
22790	return false
22791}
22792func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
22793	v_1 := v.Args[1]
22794	v_0 := v.Args[0]
22795	b := v.Block
22796	// match: (TESTW (MOVLconst [c]) x)
22797	// result: (TESTWconst [int16(c)] x)
22798	for {
22799		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22800			if v_0.Op != OpAMD64MOVLconst {
22801				continue
22802			}
22803			c := auxIntToInt32(v_0.AuxInt)
22804			x := v_1
22805			v.reset(OpAMD64TESTWconst)
22806			v.AuxInt = int16ToAuxInt(int16(c))
22807			v.AddArg(x)
22808			return true
22809		}
22810		break
22811	}
22812	// match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
22813	// cond: l == l2 && l.Uses == 2 && clobber(l)
22814	// result: @l.Block (CMPWconstload {sym} [makeValAndOff(0, off)] ptr mem)
22815	for {
22816		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22817			l := v_0
22818			if l.Op != OpAMD64MOVWload {
22819				continue
22820			}
22821			off := auxIntToInt32(l.AuxInt)
22822			sym := auxToSym(l.Aux)
22823			mem := l.Args[1]
22824			ptr := l.Args[0]
22825			l2 := v_1
22826			if !(l == l2 && l.Uses == 2 && clobber(l)) {
22827				continue
22828			}
22829			b = l.Block
22830			v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
22831			v.copyOf(v0)
22832			v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22833			v0.Aux = symToAux(sym)
22834			v0.AddArg2(ptr, mem)
22835			return true
22836		}
22837		break
22838	}
22839	return false
22840}
22841func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
22842	v_0 := v.Args[0]
22843	// match: (TESTWconst [-1] x)
22844	// cond: x.Op != OpAMD64MOVLconst
22845	// result: (TESTW x x)
22846	for {
22847		if auxIntToInt16(v.AuxInt) != -1 {
22848			break
22849		}
22850		x := v_0
22851		if !(x.Op != OpAMD64MOVLconst) {
22852			break
22853		}
22854		v.reset(OpAMD64TESTW)
22855		v.AddArg2(x, x)
22856		return true
22857	}
22858	return false
22859}
22860func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
22861	v_2 := v.Args[2]
22862	v_1 := v.Args[1]
22863	v_0 := v.Args[0]
22864	// match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
22865	// cond: is32Bit(int64(off1)+int64(off2))
22866	// result: (XADDLlock [off1+off2] {sym} val ptr mem)
22867	for {
22868		off1 := auxIntToInt32(v.AuxInt)
22869		sym := auxToSym(v.Aux)
22870		val := v_0
22871		if v_1.Op != OpAMD64ADDQconst {
22872			break
22873		}
22874		off2 := auxIntToInt32(v_1.AuxInt)
22875		ptr := v_1.Args[0]
22876		mem := v_2
22877		if !(is32Bit(int64(off1) + int64(off2))) {
22878			break
22879		}
22880		v.reset(OpAMD64XADDLlock)
22881		v.AuxInt = int32ToAuxInt(off1 + off2)
22882		v.Aux = symToAux(sym)
22883		v.AddArg3(val, ptr, mem)
22884		return true
22885	}
22886	return false
22887}
22888func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
22889	v_2 := v.Args[2]
22890	v_1 := v.Args[1]
22891	v_0 := v.Args[0]
22892	// match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
22893	// cond: is32Bit(int64(off1)+int64(off2))
22894	// result: (XADDQlock [off1+off2] {sym} val ptr mem)
22895	for {
22896		off1 := auxIntToInt32(v.AuxInt)
22897		sym := auxToSym(v.Aux)
22898		val := v_0
22899		if v_1.Op != OpAMD64ADDQconst {
22900			break
22901		}
22902		off2 := auxIntToInt32(v_1.AuxInt)
22903		ptr := v_1.Args[0]
22904		mem := v_2
22905		if !(is32Bit(int64(off1) + int64(off2))) {
22906			break
22907		}
22908		v.reset(OpAMD64XADDQlock)
22909		v.AuxInt = int32ToAuxInt(off1 + off2)
22910		v.Aux = symToAux(sym)
22911		v.AddArg3(val, ptr, mem)
22912		return true
22913	}
22914	return false
22915}
22916func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
22917	v_2 := v.Args[2]
22918	v_1 := v.Args[1]
22919	v_0 := v.Args[0]
22920	// match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
22921	// cond: is32Bit(int64(off1)+int64(off2))
22922	// result: (XCHGL [off1+off2] {sym} val ptr mem)
22923	for {
22924		off1 := auxIntToInt32(v.AuxInt)
22925		sym := auxToSym(v.Aux)
22926		val := v_0
22927		if v_1.Op != OpAMD64ADDQconst {
22928			break
22929		}
22930		off2 := auxIntToInt32(v_1.AuxInt)
22931		ptr := v_1.Args[0]
22932		mem := v_2
22933		if !(is32Bit(int64(off1) + int64(off2))) {
22934			break
22935		}
22936		v.reset(OpAMD64XCHGL)
22937		v.AuxInt = int32ToAuxInt(off1 + off2)
22938		v.Aux = symToAux(sym)
22939		v.AddArg3(val, ptr, mem)
22940		return true
22941	}
22942	// match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
22943	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
22944	// result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
22945	for {
22946		off1 := auxIntToInt32(v.AuxInt)
22947		sym1 := auxToSym(v.Aux)
22948		val := v_0
22949		if v_1.Op != OpAMD64LEAQ {
22950			break
22951		}
22952		off2 := auxIntToInt32(v_1.AuxInt)
22953		sym2 := auxToSym(v_1.Aux)
22954		ptr := v_1.Args[0]
22955		mem := v_2
22956		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
22957			break
22958		}
22959		v.reset(OpAMD64XCHGL)
22960		v.AuxInt = int32ToAuxInt(off1 + off2)
22961		v.Aux = symToAux(mergeSym(sym1, sym2))
22962		v.AddArg3(val, ptr, mem)
22963		return true
22964	}
22965	return false
22966}
22967func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
22968	v_2 := v.Args[2]
22969	v_1 := v.Args[1]
22970	v_0 := v.Args[0]
22971	// match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
22972	// cond: is32Bit(int64(off1)+int64(off2))
22973	// result: (XCHGQ [off1+off2] {sym} val ptr mem)
22974	for {
22975		off1 := auxIntToInt32(v.AuxInt)
22976		sym := auxToSym(v.Aux)
22977		val := v_0
22978		if v_1.Op != OpAMD64ADDQconst {
22979			break
22980		}
22981		off2 := auxIntToInt32(v_1.AuxInt)
22982		ptr := v_1.Args[0]
22983		mem := v_2
22984		if !(is32Bit(int64(off1) + int64(off2))) {
22985			break
22986		}
22987		v.reset(OpAMD64XCHGQ)
22988		v.AuxInt = int32ToAuxInt(off1 + off2)
22989		v.Aux = symToAux(sym)
22990		v.AddArg3(val, ptr, mem)
22991		return true
22992	}
22993	// match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
22994	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
22995	// result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
22996	for {
22997		off1 := auxIntToInt32(v.AuxInt)
22998		sym1 := auxToSym(v.Aux)
22999		val := v_0
23000		if v_1.Op != OpAMD64LEAQ {
23001			break
23002		}
23003		off2 := auxIntToInt32(v_1.AuxInt)
23004		sym2 := auxToSym(v_1.Aux)
23005		ptr := v_1.Args[0]
23006		mem := v_2
23007		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
23008			break
23009		}
23010		v.reset(OpAMD64XCHGQ)
23011		v.AuxInt = int32ToAuxInt(off1 + off2)
23012		v.Aux = symToAux(mergeSym(sym1, sym2))
23013		v.AddArg3(val, ptr, mem)
23014		return true
23015	}
23016	return false
23017}
23018func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
23019	v_1 := v.Args[1]
23020	v_0 := v.Args[0]
23021	// match: (XORL (SHLL (MOVLconst [1]) y) x)
23022	// result: (BTCL x y)
23023	for {
23024		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23025			if v_0.Op != OpAMD64SHLL {
23026				continue
23027			}
23028			y := v_0.Args[1]
23029			v_0_0 := v_0.Args[0]
23030			if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
23031				continue
23032			}
23033			x := v_1
23034			v.reset(OpAMD64BTCL)
23035			v.AddArg2(x, y)
23036			return true
23037		}
23038		break
23039	}
23040	// match: (XORL x (MOVLconst [c]))
23041	// result: (XORLconst [c] x)
23042	for {
23043		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23044			x := v_0
23045			if v_1.Op != OpAMD64MOVLconst {
23046				continue
23047			}
23048			c := auxIntToInt32(v_1.AuxInt)
23049			v.reset(OpAMD64XORLconst)
23050			v.AuxInt = int32ToAuxInt(c)
23051			v.AddArg(x)
23052			return true
23053		}
23054		break
23055	}
23056	// match: (XORL x x)
23057	// result: (MOVLconst [0])
23058	for {
23059		x := v_0
23060		if x != v_1 {
23061			break
23062		}
23063		v.reset(OpAMD64MOVLconst)
23064		v.AuxInt = int32ToAuxInt(0)
23065		return true
23066	}
23067	// match: (XORL x l:(MOVLload [off] {sym} ptr mem))
23068	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
23069	// result: (XORLload x [off] {sym} ptr mem)
23070	for {
23071		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23072			x := v_0
23073			l := v_1
23074			if l.Op != OpAMD64MOVLload {
23075				continue
23076			}
23077			off := auxIntToInt32(l.AuxInt)
23078			sym := auxToSym(l.Aux)
23079			mem := l.Args[1]
23080			ptr := l.Args[0]
23081			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23082				continue
23083			}
23084			v.reset(OpAMD64XORLload)
23085			v.AuxInt = int32ToAuxInt(off)
23086			v.Aux = symToAux(sym)
23087			v.AddArg3(x, ptr, mem)
23088			return true
23089		}
23090		break
23091	}
23092	// match: (XORL x (ADDLconst [-1] x))
23093	// cond: buildcfg.GOAMD64 >= 3
23094	// result: (BLSMSKL x)
23095	for {
23096		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23097			x := v_0
23098			if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23099				continue
23100			}
23101			v.reset(OpAMD64BLSMSKL)
23102			v.AddArg(x)
23103			return true
23104		}
23105		break
23106	}
23107	return false
23108}
23109func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
23110	v_0 := v.Args[0]
23111	// match: (XORLconst [1] (SETNE x))
23112	// result: (SETEQ x)
23113	for {
23114		if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
23115			break
23116		}
23117		x := v_0.Args[0]
23118		v.reset(OpAMD64SETEQ)
23119		v.AddArg(x)
23120		return true
23121	}
23122	// match: (XORLconst [1] (SETEQ x))
23123	// result: (SETNE x)
23124	for {
23125		if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
23126			break
23127		}
23128		x := v_0.Args[0]
23129		v.reset(OpAMD64SETNE)
23130		v.AddArg(x)
23131		return true
23132	}
23133	// match: (XORLconst [1] (SETL x))
23134	// result: (SETGE x)
23135	for {
23136		if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
23137			break
23138		}
23139		x := v_0.Args[0]
23140		v.reset(OpAMD64SETGE)
23141		v.AddArg(x)
23142		return true
23143	}
23144	// match: (XORLconst [1] (SETGE x))
23145	// result: (SETL x)
23146	for {
23147		if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
23148			break
23149		}
23150		x := v_0.Args[0]
23151		v.reset(OpAMD64SETL)
23152		v.AddArg(x)
23153		return true
23154	}
23155	// match: (XORLconst [1] (SETLE x))
23156	// result: (SETG x)
23157	for {
23158		if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
23159			break
23160		}
23161		x := v_0.Args[0]
23162		v.reset(OpAMD64SETG)
23163		v.AddArg(x)
23164		return true
23165	}
23166	// match: (XORLconst [1] (SETG x))
23167	// result: (SETLE x)
23168	for {
23169		if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
23170			break
23171		}
23172		x := v_0.Args[0]
23173		v.reset(OpAMD64SETLE)
23174		v.AddArg(x)
23175		return true
23176	}
23177	// match: (XORLconst [1] (SETB x))
23178	// result: (SETAE x)
23179	for {
23180		if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
23181			break
23182		}
23183		x := v_0.Args[0]
23184		v.reset(OpAMD64SETAE)
23185		v.AddArg(x)
23186		return true
23187	}
23188	// match: (XORLconst [1] (SETAE x))
23189	// result: (SETB x)
23190	for {
23191		if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
23192			break
23193		}
23194		x := v_0.Args[0]
23195		v.reset(OpAMD64SETB)
23196		v.AddArg(x)
23197		return true
23198	}
23199	// match: (XORLconst [1] (SETBE x))
23200	// result: (SETA x)
23201	for {
23202		if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
23203			break
23204		}
23205		x := v_0.Args[0]
23206		v.reset(OpAMD64SETA)
23207		v.AddArg(x)
23208		return true
23209	}
23210	// match: (XORLconst [1] (SETA x))
23211	// result: (SETBE x)
23212	for {
23213		if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
23214			break
23215		}
23216		x := v_0.Args[0]
23217		v.reset(OpAMD64SETBE)
23218		v.AddArg(x)
23219		return true
23220	}
23221	// match: (XORLconst [c] (XORLconst [d] x))
23222	// result: (XORLconst [c ^ d] x)
23223	for {
23224		c := auxIntToInt32(v.AuxInt)
23225		if v_0.Op != OpAMD64XORLconst {
23226			break
23227		}
23228		d := auxIntToInt32(v_0.AuxInt)
23229		x := v_0.Args[0]
23230		v.reset(OpAMD64XORLconst)
23231		v.AuxInt = int32ToAuxInt(c ^ d)
23232		v.AddArg(x)
23233		return true
23234	}
23235	// match: (XORLconst [c] x)
23236	// cond: c==0
23237	// result: x
23238	for {
23239		c := auxIntToInt32(v.AuxInt)
23240		x := v_0
23241		if !(c == 0) {
23242			break
23243		}
23244		v.copyOf(x)
23245		return true
23246	}
23247	// match: (XORLconst [c] (MOVLconst [d]))
23248	// result: (MOVLconst [c^d])
23249	for {
23250		c := auxIntToInt32(v.AuxInt)
23251		if v_0.Op != OpAMD64MOVLconst {
23252			break
23253		}
23254		d := auxIntToInt32(v_0.AuxInt)
23255		v.reset(OpAMD64MOVLconst)
23256		v.AuxInt = int32ToAuxInt(c ^ d)
23257		return true
23258	}
23259	return false
23260}
23261func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
23262	v_1 := v.Args[1]
23263	v_0 := v.Args[0]
23264	// match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
23265	// cond: ValAndOff(valoff1).canAdd32(off2)
23266	// result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
23267	for {
23268		valoff1 := auxIntToValAndOff(v.AuxInt)
23269		sym := auxToSym(v.Aux)
23270		if v_0.Op != OpAMD64ADDQconst {
23271			break
23272		}
23273		off2 := auxIntToInt32(v_0.AuxInt)
23274		base := v_0.Args[0]
23275		mem := v_1
23276		if !(ValAndOff(valoff1).canAdd32(off2)) {
23277			break
23278		}
23279		v.reset(OpAMD64XORLconstmodify)
23280		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23281		v.Aux = symToAux(sym)
23282		v.AddArg2(base, mem)
23283		return true
23284	}
23285	// match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
23286	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
23287	// result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
23288	for {
23289		valoff1 := auxIntToValAndOff(v.AuxInt)
23290		sym1 := auxToSym(v.Aux)
23291		if v_0.Op != OpAMD64LEAQ {
23292			break
23293		}
23294		off2 := auxIntToInt32(v_0.AuxInt)
23295		sym2 := auxToSym(v_0.Aux)
23296		base := v_0.Args[0]
23297		mem := v_1
23298		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23299			break
23300		}
23301		v.reset(OpAMD64XORLconstmodify)
23302		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23303		v.Aux = symToAux(mergeSym(sym1, sym2))
23304		v.AddArg2(base, mem)
23305		return true
23306	}
23307	return false
23308}
23309func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
23310	v_2 := v.Args[2]
23311	v_1 := v.Args[1]
23312	v_0 := v.Args[0]
23313	b := v.Block
23314	typ := &b.Func.Config.Types
23315	// match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem)
23316	// cond: is32Bit(int64(off1)+int64(off2))
23317	// result: (XORLload [off1+off2] {sym} val base mem)
23318	for {
23319		off1 := auxIntToInt32(v.AuxInt)
23320		sym := auxToSym(v.Aux)
23321		val := v_0
23322		if v_1.Op != OpAMD64ADDQconst {
23323			break
23324		}
23325		off2 := auxIntToInt32(v_1.AuxInt)
23326		base := v_1.Args[0]
23327		mem := v_2
23328		if !(is32Bit(int64(off1) + int64(off2))) {
23329			break
23330		}
23331		v.reset(OpAMD64XORLload)
23332		v.AuxInt = int32ToAuxInt(off1 + off2)
23333		v.Aux = symToAux(sym)
23334		v.AddArg3(val, base, mem)
23335		return true
23336	}
23337	// match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
23338	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
23339	// result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
23340	for {
23341		off1 := auxIntToInt32(v.AuxInt)
23342		sym1 := auxToSym(v.Aux)
23343		val := v_0
23344		if v_1.Op != OpAMD64LEAQ {
23345			break
23346		}
23347		off2 := auxIntToInt32(v_1.AuxInt)
23348		sym2 := auxToSym(v_1.Aux)
23349		base := v_1.Args[0]
23350		mem := v_2
23351		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23352			break
23353		}
23354		v.reset(OpAMD64XORLload)
23355		v.AuxInt = int32ToAuxInt(off1 + off2)
23356		v.Aux = symToAux(mergeSym(sym1, sym2))
23357		v.AddArg3(val, base, mem)
23358		return true
23359	}
23360	// match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
23361	// result: (XORL x (MOVLf2i y))
23362	for {
23363		off := auxIntToInt32(v.AuxInt)
23364		sym := auxToSym(v.Aux)
23365		x := v_0
23366		ptr := v_1
23367		if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23368			break
23369		}
23370		y := v_2.Args[1]
23371		if ptr != v_2.Args[0] {
23372			break
23373		}
23374		v.reset(OpAMD64XORL)
23375		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
23376		v0.AddArg(y)
23377		v.AddArg2(x, v0)
23378		return true
23379	}
23380	return false
23381}
23382func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
23383	v_2 := v.Args[2]
23384	v_1 := v.Args[1]
23385	v_0 := v.Args[0]
23386	// match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
23387	// cond: is32Bit(int64(off1)+int64(off2))
23388	// result: (XORLmodify [off1+off2] {sym} base val mem)
23389	for {
23390		off1 := auxIntToInt32(v.AuxInt)
23391		sym := auxToSym(v.Aux)
23392		if v_0.Op != OpAMD64ADDQconst {
23393			break
23394		}
23395		off2 := auxIntToInt32(v_0.AuxInt)
23396		base := v_0.Args[0]
23397		val := v_1
23398		mem := v_2
23399		if !(is32Bit(int64(off1) + int64(off2))) {
23400			break
23401		}
23402		v.reset(OpAMD64XORLmodify)
23403		v.AuxInt = int32ToAuxInt(off1 + off2)
23404		v.Aux = symToAux(sym)
23405		v.AddArg3(base, val, mem)
23406		return true
23407	}
23408	// match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
23409	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
23410	// result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
23411	for {
23412		off1 := auxIntToInt32(v.AuxInt)
23413		sym1 := auxToSym(v.Aux)
23414		if v_0.Op != OpAMD64LEAQ {
23415			break
23416		}
23417		off2 := auxIntToInt32(v_0.AuxInt)
23418		sym2 := auxToSym(v_0.Aux)
23419		base := v_0.Args[0]
23420		val := v_1
23421		mem := v_2
23422		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23423			break
23424		}
23425		v.reset(OpAMD64XORLmodify)
23426		v.AuxInt = int32ToAuxInt(off1 + off2)
23427		v.Aux = symToAux(mergeSym(sym1, sym2))
23428		v.AddArg3(base, val, mem)
23429		return true
23430	}
23431	return false
23432}
23433func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
23434	v_1 := v.Args[1]
23435	v_0 := v.Args[0]
23436	// match: (XORQ (SHLQ (MOVQconst [1]) y) x)
23437	// result: (BTCQ x y)
23438	for {
23439		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23440			if v_0.Op != OpAMD64SHLQ {
23441				continue
23442			}
23443			y := v_0.Args[1]
23444			v_0_0 := v_0.Args[0]
23445			if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
23446				continue
23447			}
23448			x := v_1
23449			v.reset(OpAMD64BTCQ)
23450			v.AddArg2(x, y)
23451			return true
23452		}
23453		break
23454	}
23455	// match: (XORQ (MOVQconst [c]) x)
23456	// cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
23457	// result: (BTCQconst [int8(log64(c))] x)
23458	for {
23459		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23460			if v_0.Op != OpAMD64MOVQconst {
23461				continue
23462			}
23463			c := auxIntToInt64(v_0.AuxInt)
23464			x := v_1
23465			if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
23466				continue
23467			}
23468			v.reset(OpAMD64BTCQconst)
23469			v.AuxInt = int8ToAuxInt(int8(log64(c)))
23470			v.AddArg(x)
23471			return true
23472		}
23473		break
23474	}
23475	// match: (XORQ x (MOVQconst [c]))
23476	// cond: is32Bit(c)
23477	// result: (XORQconst [int32(c)] x)
23478	for {
23479		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23480			x := v_0
23481			if v_1.Op != OpAMD64MOVQconst {
23482				continue
23483			}
23484			c := auxIntToInt64(v_1.AuxInt)
23485			if !(is32Bit(c)) {
23486				continue
23487			}
23488			v.reset(OpAMD64XORQconst)
23489			v.AuxInt = int32ToAuxInt(int32(c))
23490			v.AddArg(x)
23491			return true
23492		}
23493		break
23494	}
23495	// match: (XORQ x x)
23496	// result: (MOVQconst [0])
23497	for {
23498		x := v_0
23499		if x != v_1 {
23500			break
23501		}
23502		v.reset(OpAMD64MOVQconst)
23503		v.AuxInt = int64ToAuxInt(0)
23504		return true
23505	}
23506	// match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
23507	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
23508	// result: (XORQload x [off] {sym} ptr mem)
23509	for {
23510		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23511			x := v_0
23512			l := v_1
23513			if l.Op != OpAMD64MOVQload {
23514				continue
23515			}
23516			off := auxIntToInt32(l.AuxInt)
23517			sym := auxToSym(l.Aux)
23518			mem := l.Args[1]
23519			ptr := l.Args[0]
23520			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23521				continue
23522			}
23523			v.reset(OpAMD64XORQload)
23524			v.AuxInt = int32ToAuxInt(off)
23525			v.Aux = symToAux(sym)
23526			v.AddArg3(x, ptr, mem)
23527			return true
23528		}
23529		break
23530	}
23531	// match: (XORQ x (ADDQconst [-1] x))
23532	// cond: buildcfg.GOAMD64 >= 3
23533	// result: (BLSMSKQ x)
23534	for {
23535		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23536			x := v_0
23537			if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23538				continue
23539			}
23540			v.reset(OpAMD64BLSMSKQ)
23541			v.AddArg(x)
23542			return true
23543		}
23544		break
23545	}
23546	return false
23547}
23548func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
23549	v_0 := v.Args[0]
23550	// match: (XORQconst [c] (XORQconst [d] x))
23551	// result: (XORQconst [c ^ d] x)
23552	for {
23553		c := auxIntToInt32(v.AuxInt)
23554		if v_0.Op != OpAMD64XORQconst {
23555			break
23556		}
23557		d := auxIntToInt32(v_0.AuxInt)
23558		x := v_0.Args[0]
23559		v.reset(OpAMD64XORQconst)
23560		v.AuxInt = int32ToAuxInt(c ^ d)
23561		v.AddArg(x)
23562		return true
23563	}
23564	// match: (XORQconst [0] x)
23565	// result: x
23566	for {
23567		if auxIntToInt32(v.AuxInt) != 0 {
23568			break
23569		}
23570		x := v_0
23571		v.copyOf(x)
23572		return true
23573	}
23574	// match: (XORQconst [c] (MOVQconst [d]))
23575	// result: (MOVQconst [int64(c)^d])
23576	for {
23577		c := auxIntToInt32(v.AuxInt)
23578		if v_0.Op != OpAMD64MOVQconst {
23579			break
23580		}
23581		d := auxIntToInt64(v_0.AuxInt)
23582		v.reset(OpAMD64MOVQconst)
23583		v.AuxInt = int64ToAuxInt(int64(c) ^ d)
23584		return true
23585	}
23586	return false
23587}
23588func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
23589	v_1 := v.Args[1]
23590	v_0 := v.Args[0]
23591	// match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
23592	// cond: ValAndOff(valoff1).canAdd32(off2)
23593	// result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
23594	for {
23595		valoff1 := auxIntToValAndOff(v.AuxInt)
23596		sym := auxToSym(v.Aux)
23597		if v_0.Op != OpAMD64ADDQconst {
23598			break
23599		}
23600		off2 := auxIntToInt32(v_0.AuxInt)
23601		base := v_0.Args[0]
23602		mem := v_1
23603		if !(ValAndOff(valoff1).canAdd32(off2)) {
23604			break
23605		}
23606		v.reset(OpAMD64XORQconstmodify)
23607		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23608		v.Aux = symToAux(sym)
23609		v.AddArg2(base, mem)
23610		return true
23611	}
23612	// match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
23613	// cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
23614	// result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
23615	for {
23616		valoff1 := auxIntToValAndOff(v.AuxInt)
23617		sym1 := auxToSym(v.Aux)
23618		if v_0.Op != OpAMD64LEAQ {
23619			break
23620		}
23621		off2 := auxIntToInt32(v_0.AuxInt)
23622		sym2 := auxToSym(v_0.Aux)
23623		base := v_0.Args[0]
23624		mem := v_1
23625		if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23626			break
23627		}
23628		v.reset(OpAMD64XORQconstmodify)
23629		v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23630		v.Aux = symToAux(mergeSym(sym1, sym2))
23631		v.AddArg2(base, mem)
23632		return true
23633	}
23634	return false
23635}
23636func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
23637	v_2 := v.Args[2]
23638	v_1 := v.Args[1]
23639	v_0 := v.Args[0]
23640	b := v.Block
23641	typ := &b.Func.Config.Types
23642	// match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem)
23643	// cond: is32Bit(int64(off1)+int64(off2))
23644	// result: (XORQload [off1+off2] {sym} val base mem)
23645	for {
23646		off1 := auxIntToInt32(v.AuxInt)
23647		sym := auxToSym(v.Aux)
23648		val := v_0
23649		if v_1.Op != OpAMD64ADDQconst {
23650			break
23651		}
23652		off2 := auxIntToInt32(v_1.AuxInt)
23653		base := v_1.Args[0]
23654		mem := v_2
23655		if !(is32Bit(int64(off1) + int64(off2))) {
23656			break
23657		}
23658		v.reset(OpAMD64XORQload)
23659		v.AuxInt = int32ToAuxInt(off1 + off2)
23660		v.Aux = symToAux(sym)
23661		v.AddArg3(val, base, mem)
23662		return true
23663	}
23664	// match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
23665	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
23666	// result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
23667	for {
23668		off1 := auxIntToInt32(v.AuxInt)
23669		sym1 := auxToSym(v.Aux)
23670		val := v_0
23671		if v_1.Op != OpAMD64LEAQ {
23672			break
23673		}
23674		off2 := auxIntToInt32(v_1.AuxInt)
23675		sym2 := auxToSym(v_1.Aux)
23676		base := v_1.Args[0]
23677		mem := v_2
23678		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23679			break
23680		}
23681		v.reset(OpAMD64XORQload)
23682		v.AuxInt = int32ToAuxInt(off1 + off2)
23683		v.Aux = symToAux(mergeSym(sym1, sym2))
23684		v.AddArg3(val, base, mem)
23685		return true
23686	}
23687	// match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
23688	// result: (XORQ x (MOVQf2i y))
23689	for {
23690		off := auxIntToInt32(v.AuxInt)
23691		sym := auxToSym(v.Aux)
23692		x := v_0
23693		ptr := v_1
23694		if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23695			break
23696		}
23697		y := v_2.Args[1]
23698		if ptr != v_2.Args[0] {
23699			break
23700		}
23701		v.reset(OpAMD64XORQ)
23702		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
23703		v0.AddArg(y)
23704		v.AddArg2(x, v0)
23705		return true
23706	}
23707	return false
23708}
23709func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
23710	v_2 := v.Args[2]
23711	v_1 := v.Args[1]
23712	v_0 := v.Args[0]
23713	// match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
23714	// cond: is32Bit(int64(off1)+int64(off2))
23715	// result: (XORQmodify [off1+off2] {sym} base val mem)
23716	for {
23717		off1 := auxIntToInt32(v.AuxInt)
23718		sym := auxToSym(v.Aux)
23719		if v_0.Op != OpAMD64ADDQconst {
23720			break
23721		}
23722		off2 := auxIntToInt32(v_0.AuxInt)
23723		base := v_0.Args[0]
23724		val := v_1
23725		mem := v_2
23726		if !(is32Bit(int64(off1) + int64(off2))) {
23727			break
23728		}
23729		v.reset(OpAMD64XORQmodify)
23730		v.AuxInt = int32ToAuxInt(off1 + off2)
23731		v.Aux = symToAux(sym)
23732		v.AddArg3(base, val, mem)
23733		return true
23734	}
23735	// match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
23736	// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
23737	// result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
23738	for {
23739		off1 := auxIntToInt32(v.AuxInt)
23740		sym1 := auxToSym(v.Aux)
23741		if v_0.Op != OpAMD64LEAQ {
23742			break
23743		}
23744		off2 := auxIntToInt32(v_0.AuxInt)
23745		sym2 := auxToSym(v_0.Aux)
23746		base := v_0.Args[0]
23747		val := v_1
23748		mem := v_2
23749		if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23750			break
23751		}
23752		v.reset(OpAMD64XORQmodify)
23753		v.AuxInt = int32ToAuxInt(off1 + off2)
23754		v.Aux = symToAux(mergeSym(sym1, sym2))
23755		v.AddArg3(base, val, mem)
23756		return true
23757	}
23758	return false
23759}
23760func rewriteValueAMD64_OpAddr(v *Value) bool {
23761	v_0 := v.Args[0]
23762	// match: (Addr {sym} base)
23763	// result: (LEAQ {sym} base)
23764	for {
23765		sym := auxToSym(v.Aux)
23766		base := v_0
23767		v.reset(OpAMD64LEAQ)
23768		v.Aux = symToAux(sym)
23769		v.AddArg(base)
23770		return true
23771	}
23772}
23773func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
23774	v_2 := v.Args[2]
23775	v_1 := v.Args[1]
23776	v_0 := v.Args[0]
23777	b := v.Block
23778	typ := &b.Func.Config.Types
23779	// match: (AtomicAdd32 ptr val mem)
23780	// result: (AddTupleFirst32 val (XADDLlock val ptr mem))
23781	for {
23782		ptr := v_0
23783		val := v_1
23784		mem := v_2
23785		v.reset(OpAMD64AddTupleFirst32)
23786		v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
23787		v0.AddArg3(val, ptr, mem)
23788		v.AddArg2(val, v0)
23789		return true
23790	}
23791}
23792func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
23793	v_2 := v.Args[2]
23794	v_1 := v.Args[1]
23795	v_0 := v.Args[0]
23796	b := v.Block
23797	typ := &b.Func.Config.Types
23798	// match: (AtomicAdd64 ptr val mem)
23799	// result: (AddTupleFirst64 val (XADDQlock val ptr mem))
23800	for {
23801		ptr := v_0
23802		val := v_1
23803		mem := v_2
23804		v.reset(OpAMD64AddTupleFirst64)
23805		v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
23806		v0.AddArg3(val, ptr, mem)
23807		v.AddArg2(val, v0)
23808		return true
23809	}
23810}
23811func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
23812	v_2 := v.Args[2]
23813	v_1 := v.Args[1]
23814	v_0 := v.Args[0]
23815	// match: (AtomicAnd32 ptr val mem)
23816	// result: (ANDLlock ptr val mem)
23817	for {
23818		ptr := v_0
23819		val := v_1
23820		mem := v_2
23821		v.reset(OpAMD64ANDLlock)
23822		v.AddArg3(ptr, val, mem)
23823		return true
23824	}
23825}
23826func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
23827	v_2 := v.Args[2]
23828	v_1 := v.Args[1]
23829	v_0 := v.Args[0]
23830	// match: (AtomicAnd8 ptr val mem)
23831	// result: (ANDBlock ptr val mem)
23832	for {
23833		ptr := v_0
23834		val := v_1
23835		mem := v_2
23836		v.reset(OpAMD64ANDBlock)
23837		v.AddArg3(ptr, val, mem)
23838		return true
23839	}
23840}
23841func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
23842	v_3 := v.Args[3]
23843	v_2 := v.Args[2]
23844	v_1 := v.Args[1]
23845	v_0 := v.Args[0]
23846	// match: (AtomicCompareAndSwap32 ptr old new_ mem)
23847	// result: (CMPXCHGLlock ptr old new_ mem)
23848	for {
23849		ptr := v_0
23850		old := v_1
23851		new_ := v_2
23852		mem := v_3
23853		v.reset(OpAMD64CMPXCHGLlock)
23854		v.AddArg4(ptr, old, new_, mem)
23855		return true
23856	}
23857}
23858func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
23859	v_3 := v.Args[3]
23860	v_2 := v.Args[2]
23861	v_1 := v.Args[1]
23862	v_0 := v.Args[0]
23863	// match: (AtomicCompareAndSwap64 ptr old new_ mem)
23864	// result: (CMPXCHGQlock ptr old new_ mem)
23865	for {
23866		ptr := v_0
23867		old := v_1
23868		new_ := v_2
23869		mem := v_3
23870		v.reset(OpAMD64CMPXCHGQlock)
23871		v.AddArg4(ptr, old, new_, mem)
23872		return true
23873	}
23874}
23875func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
23876	v_2 := v.Args[2]
23877	v_1 := v.Args[1]
23878	v_0 := v.Args[0]
23879	// match: (AtomicExchange32 ptr val mem)
23880	// result: (XCHGL val ptr mem)
23881	for {
23882		ptr := v_0
23883		val := v_1
23884		mem := v_2
23885		v.reset(OpAMD64XCHGL)
23886		v.AddArg3(val, ptr, mem)
23887		return true
23888	}
23889}
23890func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
23891	v_2 := v.Args[2]
23892	v_1 := v.Args[1]
23893	v_0 := v.Args[0]
23894	// match: (AtomicExchange64 ptr val mem)
23895	// result: (XCHGQ val ptr mem)
23896	for {
23897		ptr := v_0
23898		val := v_1
23899		mem := v_2
23900		v.reset(OpAMD64XCHGQ)
23901		v.AddArg3(val, ptr, mem)
23902		return true
23903	}
23904}
23905func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
23906	v_1 := v.Args[1]
23907	v_0 := v.Args[0]
23908	// match: (AtomicLoad32 ptr mem)
23909	// result: (MOVLatomicload ptr mem)
23910	for {
23911		ptr := v_0
23912		mem := v_1
23913		v.reset(OpAMD64MOVLatomicload)
23914		v.AddArg2(ptr, mem)
23915		return true
23916	}
23917}
23918func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
23919	v_1 := v.Args[1]
23920	v_0 := v.Args[0]
23921	// match: (AtomicLoad64 ptr mem)
23922	// result: (MOVQatomicload ptr mem)
23923	for {
23924		ptr := v_0
23925		mem := v_1
23926		v.reset(OpAMD64MOVQatomicload)
23927		v.AddArg2(ptr, mem)
23928		return true
23929	}
23930}
23931func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
23932	v_1 := v.Args[1]
23933	v_0 := v.Args[0]
23934	// match: (AtomicLoad8 ptr mem)
23935	// result: (MOVBatomicload ptr mem)
23936	for {
23937		ptr := v_0
23938		mem := v_1
23939		v.reset(OpAMD64MOVBatomicload)
23940		v.AddArg2(ptr, mem)
23941		return true
23942	}
23943}
23944func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
23945	v_1 := v.Args[1]
23946	v_0 := v.Args[0]
23947	// match: (AtomicLoadPtr ptr mem)
23948	// result: (MOVQatomicload ptr mem)
23949	for {
23950		ptr := v_0
23951		mem := v_1
23952		v.reset(OpAMD64MOVQatomicload)
23953		v.AddArg2(ptr, mem)
23954		return true
23955	}
23956}
23957func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
23958	v_2 := v.Args[2]
23959	v_1 := v.Args[1]
23960	v_0 := v.Args[0]
23961	// match: (AtomicOr32 ptr val mem)
23962	// result: (ORLlock ptr val mem)
23963	for {
23964		ptr := v_0
23965		val := v_1
23966		mem := v_2
23967		v.reset(OpAMD64ORLlock)
23968		v.AddArg3(ptr, val, mem)
23969		return true
23970	}
23971}
23972func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
23973	v_2 := v.Args[2]
23974	v_1 := v.Args[1]
23975	v_0 := v.Args[0]
23976	// match: (AtomicOr8 ptr val mem)
23977	// result: (ORBlock ptr val mem)
23978	for {
23979		ptr := v_0
23980		val := v_1
23981		mem := v_2
23982		v.reset(OpAMD64ORBlock)
23983		v.AddArg3(ptr, val, mem)
23984		return true
23985	}
23986}
23987func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
23988	v_2 := v.Args[2]
23989	v_1 := v.Args[1]
23990	v_0 := v.Args[0]
23991	b := v.Block
23992	typ := &b.Func.Config.Types
23993	// match: (AtomicStore32 ptr val mem)
23994	// result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
23995	for {
23996		ptr := v_0
23997		val := v_1
23998		mem := v_2
23999		v.reset(OpSelect1)
24000		v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
24001		v0.AddArg3(val, ptr, mem)
24002		v.AddArg(v0)
24003		return true
24004	}
24005}
24006func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
24007	v_2 := v.Args[2]
24008	v_1 := v.Args[1]
24009	v_0 := v.Args[0]
24010	b := v.Block
24011	typ := &b.Func.Config.Types
24012	// match: (AtomicStore64 ptr val mem)
24013	// result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
24014	for {
24015		ptr := v_0
24016		val := v_1
24017		mem := v_2
24018		v.reset(OpSelect1)
24019		v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
24020		v0.AddArg3(val, ptr, mem)
24021		v.AddArg(v0)
24022		return true
24023	}
24024}
24025func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
24026	v_2 := v.Args[2]
24027	v_1 := v.Args[1]
24028	v_0 := v.Args[0]
24029	b := v.Block
24030	typ := &b.Func.Config.Types
24031	// match: (AtomicStore8 ptr val mem)
24032	// result: (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
24033	for {
24034		ptr := v_0
24035		val := v_1
24036		mem := v_2
24037		v.reset(OpSelect1)
24038		v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
24039		v0.AddArg3(val, ptr, mem)
24040		v.AddArg(v0)
24041		return true
24042	}
24043}
24044func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
24045	v_2 := v.Args[2]
24046	v_1 := v.Args[1]
24047	v_0 := v.Args[0]
24048	b := v.Block
24049	typ := &b.Func.Config.Types
24050	// match: (AtomicStorePtrNoWB ptr val mem)
24051	// result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
24052	for {
24053		ptr := v_0
24054		val := v_1
24055		mem := v_2
24056		v.reset(OpSelect1)
24057		v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
24058		v0.AddArg3(val, ptr, mem)
24059		v.AddArg(v0)
24060		return true
24061	}
24062}
24063func rewriteValueAMD64_OpBitLen16(v *Value) bool {
24064	v_0 := v.Args[0]
24065	b := v.Block
24066	typ := &b.Func.Config.Types
24067	// match: (BitLen16 x)
24068	// cond: buildcfg.GOAMD64 < 3
24069	// result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
24070	for {
24071		x := v_0
24072		if !(buildcfg.GOAMD64 < 3) {
24073			break
24074		}
24075		v.reset(OpAMD64BSRL)
24076		v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24077		v0.AuxInt = int32ToAuxInt(1)
24078		v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
24079		v1.AddArg(x)
24080		v0.AddArg2(v1, v1)
24081		v.AddArg(v0)
24082		return true
24083	}
24084	// match: (BitLen16 <t> x)
24085	// cond: buildcfg.GOAMD64 >= 3
24086	// result: (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVWQZX <x.Type> x))))
24087	for {
24088		t := v.Type
24089		x := v_0
24090		if !(buildcfg.GOAMD64 >= 3) {
24091			break
24092		}
24093		v.reset(OpAMD64NEGQ)
24094		v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24095		v0.AuxInt = int32ToAuxInt(-32)
24096		v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24097		v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type)
24098		v2.AddArg(x)
24099		v1.AddArg(v2)
24100		v0.AddArg(v1)
24101		v.AddArg(v0)
24102		return true
24103	}
24104	return false
24105}
24106func rewriteValueAMD64_OpBitLen32(v *Value) bool {
24107	v_0 := v.Args[0]
24108	b := v.Block
24109	typ := &b.Func.Config.Types
24110	// match: (BitLen32 x)
24111	// cond: buildcfg.GOAMD64 < 3
24112	// result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
24113	for {
24114		x := v_0
24115		if !(buildcfg.GOAMD64 < 3) {
24116			break
24117		}
24118		v.reset(OpSelect0)
24119		v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24120		v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
24121		v1.AuxInt = int32ToAuxInt(1)
24122		v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
24123		v2.AddArg(x)
24124		v1.AddArg2(v2, v2)
24125		v0.AddArg(v1)
24126		v.AddArg(v0)
24127		return true
24128	}
24129	// match: (BitLen32 <t> x)
24130	// cond: buildcfg.GOAMD64 >= 3
24131	// result: (NEGQ (ADDQconst <t> [-32] (LZCNTL x)))
24132	for {
24133		t := v.Type
24134		x := v_0
24135		if !(buildcfg.GOAMD64 >= 3) {
24136			break
24137		}
24138		v.reset(OpAMD64NEGQ)
24139		v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24140		v0.AuxInt = int32ToAuxInt(-32)
24141		v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24142		v1.AddArg(x)
24143		v0.AddArg(v1)
24144		v.AddArg(v0)
24145		return true
24146	}
24147	return false
24148}
24149func rewriteValueAMD64_OpBitLen64(v *Value) bool {
24150	v_0 := v.Args[0]
24151	b := v.Block
24152	typ := &b.Func.Config.Types
24153	// match: (BitLen64 <t> x)
24154	// cond: buildcfg.GOAMD64 < 3
24155	// result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
24156	for {
24157		t := v.Type
24158		x := v_0
24159		if !(buildcfg.GOAMD64 < 3) {
24160			break
24161		}
24162		v.reset(OpAMD64ADDQconst)
24163		v.AuxInt = int32ToAuxInt(1)
24164		v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
24165		v1 := b.NewValue0(v.Pos, OpSelect0, t)
24166		v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24167		v2.AddArg(x)
24168		v1.AddArg(v2)
24169		v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
24170		v3.AuxInt = int64ToAuxInt(-1)
24171		v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
24172		v4.AddArg(v2)
24173		v0.AddArg3(v1, v3, v4)
24174		v.AddArg(v0)
24175		return true
24176	}
24177	// match: (BitLen64 <t> x)
24178	// cond: buildcfg.GOAMD64 >= 3
24179	// result: (NEGQ (ADDQconst <t> [-64] (LZCNTQ x)))
24180	for {
24181		t := v.Type
24182		x := v_0
24183		if !(buildcfg.GOAMD64 >= 3) {
24184			break
24185		}
24186		v.reset(OpAMD64NEGQ)
24187		v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24188		v0.AuxInt = int32ToAuxInt(-64)
24189		v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64)
24190		v1.AddArg(x)
24191		v0.AddArg(v1)
24192		v.AddArg(v0)
24193		return true
24194	}
24195	return false
24196}
24197func rewriteValueAMD64_OpBitLen8(v *Value) bool {
24198	v_0 := v.Args[0]
24199	b := v.Block
24200	typ := &b.Func.Config.Types
24201	// match: (BitLen8 x)
24202	// cond: buildcfg.GOAMD64 < 3
24203	// result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
24204	for {
24205		x := v_0
24206		if !(buildcfg.GOAMD64 < 3) {
24207			break
24208		}
24209		v.reset(OpAMD64BSRL)
24210		v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24211		v0.AuxInt = int32ToAuxInt(1)
24212		v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
24213		v1.AddArg(x)
24214		v0.AddArg2(v1, v1)
24215		v.AddArg(v0)
24216		return true
24217	}
24218	// match: (BitLen8 <t> x)
24219	// cond: buildcfg.GOAMD64 >= 3
24220	// result: (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVBQZX <x.Type> x))))
24221	for {
24222		t := v.Type
24223		x := v_0
24224		if !(buildcfg.GOAMD64 >= 3) {
24225			break
24226		}
24227		v.reset(OpAMD64NEGQ)
24228		v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24229		v0.AuxInt = int32ToAuxInt(-32)
24230		v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24231		v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type)
24232		v2.AddArg(x)
24233		v1.AddArg(v2)
24234		v0.AddArg(v1)
24235		v.AddArg(v0)
24236		return true
24237	}
24238	return false
24239}
24240func rewriteValueAMD64_OpBswap16(v *Value) bool {
24241	v_0 := v.Args[0]
24242	// match: (Bswap16 x)
24243	// result: (ROLWconst [8] x)
24244	for {
24245		x := v_0
24246		v.reset(OpAMD64ROLWconst)
24247		v.AuxInt = int8ToAuxInt(8)
24248		v.AddArg(x)
24249		return true
24250	}
24251}
24252func rewriteValueAMD64_OpCeil(v *Value) bool {
24253	v_0 := v.Args[0]
24254	// match: (Ceil x)
24255	// result: (ROUNDSD [2] x)
24256	for {
24257		x := v_0
24258		v.reset(OpAMD64ROUNDSD)
24259		v.AuxInt = int8ToAuxInt(2)
24260		v.AddArg(x)
24261		return true
24262	}
24263}
24264func rewriteValueAMD64_OpCondSelect(v *Value) bool {
24265	v_2 := v.Args[2]
24266	v_1 := v.Args[1]
24267	v_0 := v.Args[0]
24268	b := v.Block
24269	typ := &b.Func.Config.Types
24270	// match: (CondSelect <t> x y (SETEQ cond))
24271	// cond: (is64BitInt(t) || isPtr(t))
24272	// result: (CMOVQEQ y x cond)
24273	for {
24274		t := v.Type
24275		x := v_0
24276		y := v_1
24277		if v_2.Op != OpAMD64SETEQ {
24278			break
24279		}
24280		cond := v_2.Args[0]
24281		if !(is64BitInt(t) || isPtr(t)) {
24282			break
24283		}
24284		v.reset(OpAMD64CMOVQEQ)
24285		v.AddArg3(y, x, cond)
24286		return true
24287	}
24288	// match: (CondSelect <t> x y (SETNE cond))
24289	// cond: (is64BitInt(t) || isPtr(t))
24290	// result: (CMOVQNE y x cond)
24291	for {
24292		t := v.Type
24293		x := v_0
24294		y := v_1
24295		if v_2.Op != OpAMD64SETNE {
24296			break
24297		}
24298		cond := v_2.Args[0]
24299		if !(is64BitInt(t) || isPtr(t)) {
24300			break
24301		}
24302		v.reset(OpAMD64CMOVQNE)
24303		v.AddArg3(y, x, cond)
24304		return true
24305	}
24306	// match: (CondSelect <t> x y (SETL cond))
24307	// cond: (is64BitInt(t) || isPtr(t))
24308	// result: (CMOVQLT y x cond)
24309	for {
24310		t := v.Type
24311		x := v_0
24312		y := v_1
24313		if v_2.Op != OpAMD64SETL {
24314			break
24315		}
24316		cond := v_2.Args[0]
24317		if !(is64BitInt(t) || isPtr(t)) {
24318			break
24319		}
24320		v.reset(OpAMD64CMOVQLT)
24321		v.AddArg3(y, x, cond)
24322		return true
24323	}
24324	// match: (CondSelect <t> x y (SETG cond))
24325	// cond: (is64BitInt(t) || isPtr(t))
24326	// result: (CMOVQGT y x cond)
24327	for {
24328		t := v.Type
24329		x := v_0
24330		y := v_1
24331		if v_2.Op != OpAMD64SETG {
24332			break
24333		}
24334		cond := v_2.Args[0]
24335		if !(is64BitInt(t) || isPtr(t)) {
24336			break
24337		}
24338		v.reset(OpAMD64CMOVQGT)
24339		v.AddArg3(y, x, cond)
24340		return true
24341	}
24342	// match: (CondSelect <t> x y (SETLE cond))
24343	// cond: (is64BitInt(t) || isPtr(t))
24344	// result: (CMOVQLE y x cond)
24345	for {
24346		t := v.Type
24347		x := v_0
24348		y := v_1
24349		if v_2.Op != OpAMD64SETLE {
24350			break
24351		}
24352		cond := v_2.Args[0]
24353		if !(is64BitInt(t) || isPtr(t)) {
24354			break
24355		}
24356		v.reset(OpAMD64CMOVQLE)
24357		v.AddArg3(y, x, cond)
24358		return true
24359	}
24360	// match: (CondSelect <t> x y (SETGE cond))
24361	// cond: (is64BitInt(t) || isPtr(t))
24362	// result: (CMOVQGE y x cond)
24363	for {
24364		t := v.Type
24365		x := v_0
24366		y := v_1
24367		if v_2.Op != OpAMD64SETGE {
24368			break
24369		}
24370		cond := v_2.Args[0]
24371		if !(is64BitInt(t) || isPtr(t)) {
24372			break
24373		}
24374		v.reset(OpAMD64CMOVQGE)
24375		v.AddArg3(y, x, cond)
24376		return true
24377	}
24378	// match: (CondSelect <t> x y (SETA cond))
24379	// cond: (is64BitInt(t) || isPtr(t))
24380	// result: (CMOVQHI y x cond)
24381	for {
24382		t := v.Type
24383		x := v_0
24384		y := v_1
24385		if v_2.Op != OpAMD64SETA {
24386			break
24387		}
24388		cond := v_2.Args[0]
24389		if !(is64BitInt(t) || isPtr(t)) {
24390			break
24391		}
24392		v.reset(OpAMD64CMOVQHI)
24393		v.AddArg3(y, x, cond)
24394		return true
24395	}
24396	// match: (CondSelect <t> x y (SETB cond))
24397	// cond: (is64BitInt(t) || isPtr(t))
24398	// result: (CMOVQCS y x cond)
24399	for {
24400		t := v.Type
24401		x := v_0
24402		y := v_1
24403		if v_2.Op != OpAMD64SETB {
24404			break
24405		}
24406		cond := v_2.Args[0]
24407		if !(is64BitInt(t) || isPtr(t)) {
24408			break
24409		}
24410		v.reset(OpAMD64CMOVQCS)
24411		v.AddArg3(y, x, cond)
24412		return true
24413	}
24414	// match: (CondSelect <t> x y (SETAE cond))
24415	// cond: (is64BitInt(t) || isPtr(t))
24416	// result: (CMOVQCC y x cond)
24417	for {
24418		t := v.Type
24419		x := v_0
24420		y := v_1
24421		if v_2.Op != OpAMD64SETAE {
24422			break
24423		}
24424		cond := v_2.Args[0]
24425		if !(is64BitInt(t) || isPtr(t)) {
24426			break
24427		}
24428		v.reset(OpAMD64CMOVQCC)
24429		v.AddArg3(y, x, cond)
24430		return true
24431	}
24432	// match: (CondSelect <t> x y (SETBE cond))
24433	// cond: (is64BitInt(t) || isPtr(t))
24434	// result: (CMOVQLS y x cond)
24435	for {
24436		t := v.Type
24437		x := v_0
24438		y := v_1
24439		if v_2.Op != OpAMD64SETBE {
24440			break
24441		}
24442		cond := v_2.Args[0]
24443		if !(is64BitInt(t) || isPtr(t)) {
24444			break
24445		}
24446		v.reset(OpAMD64CMOVQLS)
24447		v.AddArg3(y, x, cond)
24448		return true
24449	}
24450	// match: (CondSelect <t> x y (SETEQF cond))
24451	// cond: (is64BitInt(t) || isPtr(t))
24452	// result: (CMOVQEQF y x cond)
24453	for {
24454		t := v.Type
24455		x := v_0
24456		y := v_1
24457		if v_2.Op != OpAMD64SETEQF {
24458			break
24459		}
24460		cond := v_2.Args[0]
24461		if !(is64BitInt(t) || isPtr(t)) {
24462			break
24463		}
24464		v.reset(OpAMD64CMOVQEQF)
24465		v.AddArg3(y, x, cond)
24466		return true
24467	}
24468	// match: (CondSelect <t> x y (SETNEF cond))
24469	// cond: (is64BitInt(t) || isPtr(t))
24470	// result: (CMOVQNEF y x cond)
24471	for {
24472		t := v.Type
24473		x := v_0
24474		y := v_1
24475		if v_2.Op != OpAMD64SETNEF {
24476			break
24477		}
24478		cond := v_2.Args[0]
24479		if !(is64BitInt(t) || isPtr(t)) {
24480			break
24481		}
24482		v.reset(OpAMD64CMOVQNEF)
24483		v.AddArg3(y, x, cond)
24484		return true
24485	}
24486	// match: (CondSelect <t> x y (SETGF cond))
24487	// cond: (is64BitInt(t) || isPtr(t))
24488	// result: (CMOVQGTF y x cond)
24489	for {
24490		t := v.Type
24491		x := v_0
24492		y := v_1
24493		if v_2.Op != OpAMD64SETGF {
24494			break
24495		}
24496		cond := v_2.Args[0]
24497		if !(is64BitInt(t) || isPtr(t)) {
24498			break
24499		}
24500		v.reset(OpAMD64CMOVQGTF)
24501		v.AddArg3(y, x, cond)
24502		return true
24503	}
24504	// match: (CondSelect <t> x y (SETGEF cond))
24505	// cond: (is64BitInt(t) || isPtr(t))
24506	// result: (CMOVQGEF y x cond)
24507	for {
24508		t := v.Type
24509		x := v_0
24510		y := v_1
24511		if v_2.Op != OpAMD64SETGEF {
24512			break
24513		}
24514		cond := v_2.Args[0]
24515		if !(is64BitInt(t) || isPtr(t)) {
24516			break
24517		}
24518		v.reset(OpAMD64CMOVQGEF)
24519		v.AddArg3(y, x, cond)
24520		return true
24521	}
24522	// match: (CondSelect <t> x y (SETEQ cond))
24523	// cond: is32BitInt(t)
24524	// result: (CMOVLEQ y x cond)
24525	for {
24526		t := v.Type
24527		x := v_0
24528		y := v_1
24529		if v_2.Op != OpAMD64SETEQ {
24530			break
24531		}
24532		cond := v_2.Args[0]
24533		if !(is32BitInt(t)) {
24534			break
24535		}
24536		v.reset(OpAMD64CMOVLEQ)
24537		v.AddArg3(y, x, cond)
24538		return true
24539	}
24540	// match: (CondSelect <t> x y (SETNE cond))
24541	// cond: is32BitInt(t)
24542	// result: (CMOVLNE y x cond)
24543	for {
24544		t := v.Type
24545		x := v_0
24546		y := v_1
24547		if v_2.Op != OpAMD64SETNE {
24548			break
24549		}
24550		cond := v_2.Args[0]
24551		if !(is32BitInt(t)) {
24552			break
24553		}
24554		v.reset(OpAMD64CMOVLNE)
24555		v.AddArg3(y, x, cond)
24556		return true
24557	}
24558	// match: (CondSelect <t> x y (SETL cond))
24559	// cond: is32BitInt(t)
24560	// result: (CMOVLLT y x cond)
24561	for {
24562		t := v.Type
24563		x := v_0
24564		y := v_1
24565		if v_2.Op != OpAMD64SETL {
24566			break
24567		}
24568		cond := v_2.Args[0]
24569		if !(is32BitInt(t)) {
24570			break
24571		}
24572		v.reset(OpAMD64CMOVLLT)
24573		v.AddArg3(y, x, cond)
24574		return true
24575	}
24576	// match: (CondSelect <t> x y (SETG cond))
24577	// cond: is32BitInt(t)
24578	// result: (CMOVLGT y x cond)
24579	for {
24580		t := v.Type
24581		x := v_0
24582		y := v_1
24583		if v_2.Op != OpAMD64SETG {
24584			break
24585		}
24586		cond := v_2.Args[0]
24587		if !(is32BitInt(t)) {
24588			break
24589		}
24590		v.reset(OpAMD64CMOVLGT)
24591		v.AddArg3(y, x, cond)
24592		return true
24593	}
24594	// match: (CondSelect <t> x y (SETLE cond))
24595	// cond: is32BitInt(t)
24596	// result: (CMOVLLE y x cond)
24597	for {
24598		t := v.Type
24599		x := v_0
24600		y := v_1
24601		if v_2.Op != OpAMD64SETLE {
24602			break
24603		}
24604		cond := v_2.Args[0]
24605		if !(is32BitInt(t)) {
24606			break
24607		}
24608		v.reset(OpAMD64CMOVLLE)
24609		v.AddArg3(y, x, cond)
24610		return true
24611	}
24612	// match: (CondSelect <t> x y (SETGE cond))
24613	// cond: is32BitInt(t)
24614	// result: (CMOVLGE y x cond)
24615	for {
24616		t := v.Type
24617		x := v_0
24618		y := v_1
24619		if v_2.Op != OpAMD64SETGE {
24620			break
24621		}
24622		cond := v_2.Args[0]
24623		if !(is32BitInt(t)) {
24624			break
24625		}
24626		v.reset(OpAMD64CMOVLGE)
24627		v.AddArg3(y, x, cond)
24628		return true
24629	}
24630	// match: (CondSelect <t> x y (SETA cond))
24631	// cond: is32BitInt(t)
24632	// result: (CMOVLHI y x cond)
24633	for {
24634		t := v.Type
24635		x := v_0
24636		y := v_1
24637		if v_2.Op != OpAMD64SETA {
24638			break
24639		}
24640		cond := v_2.Args[0]
24641		if !(is32BitInt(t)) {
24642			break
24643		}
24644		v.reset(OpAMD64CMOVLHI)
24645		v.AddArg3(y, x, cond)
24646		return true
24647	}
24648	// match: (CondSelect <t> x y (SETB cond))
24649	// cond: is32BitInt(t)
24650	// result: (CMOVLCS y x cond)
24651	for {
24652		t := v.Type
24653		x := v_0
24654		y := v_1
24655		if v_2.Op != OpAMD64SETB {
24656			break
24657		}
24658		cond := v_2.Args[0]
24659		if !(is32BitInt(t)) {
24660			break
24661		}
24662		v.reset(OpAMD64CMOVLCS)
24663		v.AddArg3(y, x, cond)
24664		return true
24665	}
24666	// match: (CondSelect <t> x y (SETAE cond))
24667	// cond: is32BitInt(t)
24668	// result: (CMOVLCC y x cond)
24669	for {
24670		t := v.Type
24671		x := v_0
24672		y := v_1
24673		if v_2.Op != OpAMD64SETAE {
24674			break
24675		}
24676		cond := v_2.Args[0]
24677		if !(is32BitInt(t)) {
24678			break
24679		}
24680		v.reset(OpAMD64CMOVLCC)
24681		v.AddArg3(y, x, cond)
24682		return true
24683	}
24684	// match: (CondSelect <t> x y (SETBE cond))
24685	// cond: is32BitInt(t)
24686	// result: (CMOVLLS y x cond)
24687	for {
24688		t := v.Type
24689		x := v_0
24690		y := v_1
24691		if v_2.Op != OpAMD64SETBE {
24692			break
24693		}
24694		cond := v_2.Args[0]
24695		if !(is32BitInt(t)) {
24696			break
24697		}
24698		v.reset(OpAMD64CMOVLLS)
24699		v.AddArg3(y, x, cond)
24700		return true
24701	}
24702	// match: (CondSelect <t> x y (SETEQF cond))
24703	// cond: is32BitInt(t)
24704	// result: (CMOVLEQF y x cond)
24705	for {
24706		t := v.Type
24707		x := v_0
24708		y := v_1
24709		if v_2.Op != OpAMD64SETEQF {
24710			break
24711		}
24712		cond := v_2.Args[0]
24713		if !(is32BitInt(t)) {
24714			break
24715		}
24716		v.reset(OpAMD64CMOVLEQF)
24717		v.AddArg3(y, x, cond)
24718		return true
24719	}
24720	// match: (CondSelect <t> x y (SETNEF cond))
24721	// cond: is32BitInt(t)
24722	// result: (CMOVLNEF y x cond)
24723	for {
24724		t := v.Type
24725		x := v_0
24726		y := v_1
24727		if v_2.Op != OpAMD64SETNEF {
24728			break
24729		}
24730		cond := v_2.Args[0]
24731		if !(is32BitInt(t)) {
24732			break
24733		}
24734		v.reset(OpAMD64CMOVLNEF)
24735		v.AddArg3(y, x, cond)
24736		return true
24737	}
24738	// match: (CondSelect <t> x y (SETGF cond))
24739	// cond: is32BitInt(t)
24740	// result: (CMOVLGTF y x cond)
24741	for {
24742		t := v.Type
24743		x := v_0
24744		y := v_1
24745		if v_2.Op != OpAMD64SETGF {
24746			break
24747		}
24748		cond := v_2.Args[0]
24749		if !(is32BitInt(t)) {
24750			break
24751		}
24752		v.reset(OpAMD64CMOVLGTF)
24753		v.AddArg3(y, x, cond)
24754		return true
24755	}
24756	// match: (CondSelect <t> x y (SETGEF cond))
24757	// cond: is32BitInt(t)
24758	// result: (CMOVLGEF y x cond)
24759	for {
24760		t := v.Type
24761		x := v_0
24762		y := v_1
24763		if v_2.Op != OpAMD64SETGEF {
24764			break
24765		}
24766		cond := v_2.Args[0]
24767		if !(is32BitInt(t)) {
24768			break
24769		}
24770		v.reset(OpAMD64CMOVLGEF)
24771		v.AddArg3(y, x, cond)
24772		return true
24773	}
24774	// match: (CondSelect <t> x y (SETEQ cond))
24775	// cond: is16BitInt(t)
24776	// result: (CMOVWEQ y x cond)
24777	for {
24778		t := v.Type
24779		x := v_0
24780		y := v_1
24781		if v_2.Op != OpAMD64SETEQ {
24782			break
24783		}
24784		cond := v_2.Args[0]
24785		if !(is16BitInt(t)) {
24786			break
24787		}
24788		v.reset(OpAMD64CMOVWEQ)
24789		v.AddArg3(y, x, cond)
24790		return true
24791	}
24792	// match: (CondSelect <t> x y (SETNE cond))
24793	// cond: is16BitInt(t)
24794	// result: (CMOVWNE y x cond)
24795	for {
24796		t := v.Type
24797		x := v_0
24798		y := v_1
24799		if v_2.Op != OpAMD64SETNE {
24800			break
24801		}
24802		cond := v_2.Args[0]
24803		if !(is16BitInt(t)) {
24804			break
24805		}
24806		v.reset(OpAMD64CMOVWNE)
24807		v.AddArg3(y, x, cond)
24808		return true
24809	}
24810	// match: (CondSelect <t> x y (SETL cond))
24811	// cond: is16BitInt(t)
24812	// result: (CMOVWLT y x cond)
24813	for {
24814		t := v.Type
24815		x := v_0
24816		y := v_1
24817		if v_2.Op != OpAMD64SETL {
24818			break
24819		}
24820		cond := v_2.Args[0]
24821		if !(is16BitInt(t)) {
24822			break
24823		}
24824		v.reset(OpAMD64CMOVWLT)
24825		v.AddArg3(y, x, cond)
24826		return true
24827	}
24828	// match: (CondSelect <t> x y (SETG cond))
24829	// cond: is16BitInt(t)
24830	// result: (CMOVWGT y x cond)
24831	for {
24832		t := v.Type
24833		x := v_0
24834		y := v_1
24835		if v_2.Op != OpAMD64SETG {
24836			break
24837		}
24838		cond := v_2.Args[0]
24839		if !(is16BitInt(t)) {
24840			break
24841		}
24842		v.reset(OpAMD64CMOVWGT)
24843		v.AddArg3(y, x, cond)
24844		return true
24845	}
24846	// match: (CondSelect <t> x y (SETLE cond))
24847	// cond: is16BitInt(t)
24848	// result: (CMOVWLE y x cond)
24849	for {
24850		t := v.Type
24851		x := v_0
24852		y := v_1
24853		if v_2.Op != OpAMD64SETLE {
24854			break
24855		}
24856		cond := v_2.Args[0]
24857		if !(is16BitInt(t)) {
24858			break
24859		}
24860		v.reset(OpAMD64CMOVWLE)
24861		v.AddArg3(y, x, cond)
24862		return true
24863	}
24864	// match: (CondSelect <t> x y (SETGE cond))
24865	// cond: is16BitInt(t)
24866	// result: (CMOVWGE y x cond)
24867	for {
24868		t := v.Type
24869		x := v_0
24870		y := v_1
24871		if v_2.Op != OpAMD64SETGE {
24872			break
24873		}
24874		cond := v_2.Args[0]
24875		if !(is16BitInt(t)) {
24876			break
24877		}
24878		v.reset(OpAMD64CMOVWGE)
24879		v.AddArg3(y, x, cond)
24880		return true
24881	}
24882	// match: (CondSelect <t> x y (SETA cond))
24883	// cond: is16BitInt(t)
24884	// result: (CMOVWHI y x cond)
24885	for {
24886		t := v.Type
24887		x := v_0
24888		y := v_1
24889		if v_2.Op != OpAMD64SETA {
24890			break
24891		}
24892		cond := v_2.Args[0]
24893		if !(is16BitInt(t)) {
24894			break
24895		}
24896		v.reset(OpAMD64CMOVWHI)
24897		v.AddArg3(y, x, cond)
24898		return true
24899	}
24900	// match: (CondSelect <t> x y (SETB cond))
24901	// cond: is16BitInt(t)
24902	// result: (CMOVWCS y x cond)
24903	for {
24904		t := v.Type
24905		x := v_0
24906		y := v_1
24907		if v_2.Op != OpAMD64SETB {
24908			break
24909		}
24910		cond := v_2.Args[0]
24911		if !(is16BitInt(t)) {
24912			break
24913		}
24914		v.reset(OpAMD64CMOVWCS)
24915		v.AddArg3(y, x, cond)
24916		return true
24917	}
24918	// match: (CondSelect <t> x y (SETAE cond))
24919	// cond: is16BitInt(t)
24920	// result: (CMOVWCC y x cond)
24921	for {
24922		t := v.Type
24923		x := v_0
24924		y := v_1
24925		if v_2.Op != OpAMD64SETAE {
24926			break
24927		}
24928		cond := v_2.Args[0]
24929		if !(is16BitInt(t)) {
24930			break
24931		}
24932		v.reset(OpAMD64CMOVWCC)
24933		v.AddArg3(y, x, cond)
24934		return true
24935	}
24936	// match: (CondSelect <t> x y (SETBE cond))
24937	// cond: is16BitInt(t)
24938	// result: (CMOVWLS y x cond)
24939	for {
24940		t := v.Type
24941		x := v_0
24942		y := v_1
24943		if v_2.Op != OpAMD64SETBE {
24944			break
24945		}
24946		cond := v_2.Args[0]
24947		if !(is16BitInt(t)) {
24948			break
24949		}
24950		v.reset(OpAMD64CMOVWLS)
24951		v.AddArg3(y, x, cond)
24952		return true
24953	}
24954	// match: (CondSelect <t> x y (SETEQF cond))
24955	// cond: is16BitInt(t)
24956	// result: (CMOVWEQF y x cond)
24957	for {
24958		t := v.Type
24959		x := v_0
24960		y := v_1
24961		if v_2.Op != OpAMD64SETEQF {
24962			break
24963		}
24964		cond := v_2.Args[0]
24965		if !(is16BitInt(t)) {
24966			break
24967		}
24968		v.reset(OpAMD64CMOVWEQF)
24969		v.AddArg3(y, x, cond)
24970		return true
24971	}
24972	// match: (CondSelect <t> x y (SETNEF cond))
24973	// cond: is16BitInt(t)
24974	// result: (CMOVWNEF y x cond)
24975	for {
24976		t := v.Type
24977		x := v_0
24978		y := v_1
24979		if v_2.Op != OpAMD64SETNEF {
24980			break
24981		}
24982		cond := v_2.Args[0]
24983		if !(is16BitInt(t)) {
24984			break
24985		}
24986		v.reset(OpAMD64CMOVWNEF)
24987		v.AddArg3(y, x, cond)
24988		return true
24989	}
24990	// match: (CondSelect <t> x y (SETGF cond))
24991	// cond: is16BitInt(t)
24992	// result: (CMOVWGTF y x cond)
24993	for {
24994		t := v.Type
24995		x := v_0
24996		y := v_1
24997		if v_2.Op != OpAMD64SETGF {
24998			break
24999		}
25000		cond := v_2.Args[0]
25001		if !(is16BitInt(t)) {
25002			break
25003		}
25004		v.reset(OpAMD64CMOVWGTF)
25005		v.AddArg3(y, x, cond)
25006		return true
25007	}
25008	// match: (CondSelect <t> x y (SETGEF cond))
25009	// cond: is16BitInt(t)
25010	// result: (CMOVWGEF y x cond)
25011	for {
25012		t := v.Type
25013		x := v_0
25014		y := v_1
25015		if v_2.Op != OpAMD64SETGEF {
25016			break
25017		}
25018		cond := v_2.Args[0]
25019		if !(is16BitInt(t)) {
25020			break
25021		}
25022		v.reset(OpAMD64CMOVWGEF)
25023		v.AddArg3(y, x, cond)
25024		return true
25025	}
25026	// match: (CondSelect <t> x y check)
25027	// cond: !check.Type.IsFlags() && check.Type.Size() == 1
25028	// result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
25029	for {
25030		t := v.Type
25031		x := v_0
25032		y := v_1
25033		check := v_2
25034		if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
25035			break
25036		}
25037		v.reset(OpCondSelect)
25038		v.Type = t
25039		v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
25040		v0.AddArg(check)
25041		v.AddArg3(x, y, v0)
25042		return true
25043	}
25044	// match: (CondSelect <t> x y check)
25045	// cond: !check.Type.IsFlags() && check.Type.Size() == 2
25046	// result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
25047	for {
25048		t := v.Type
25049		x := v_0
25050		y := v_1
25051		check := v_2
25052		if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
25053			break
25054		}
25055		v.reset(OpCondSelect)
25056		v.Type = t
25057		v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
25058		v0.AddArg(check)
25059		v.AddArg3(x, y, v0)
25060		return true
25061	}
25062	// match: (CondSelect <t> x y check)
25063	// cond: !check.Type.IsFlags() && check.Type.Size() == 4
25064	// result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
25065	for {
25066		t := v.Type
25067		x := v_0
25068		y := v_1
25069		check := v_2
25070		if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
25071			break
25072		}
25073		v.reset(OpCondSelect)
25074		v.Type = t
25075		v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
25076		v0.AddArg(check)
25077		v.AddArg3(x, y, v0)
25078		return true
25079	}
25080	// match: (CondSelect <t> x y check)
25081	// cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
25082	// result: (CMOVQNE y x (CMPQconst [0] check))
25083	for {
25084		t := v.Type
25085		x := v_0
25086		y := v_1
25087		check := v_2
25088		if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
25089			break
25090		}
25091		v.reset(OpAMD64CMOVQNE)
25092		v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25093		v0.AuxInt = int32ToAuxInt(0)
25094		v0.AddArg(check)
25095		v.AddArg3(y, x, v0)
25096		return true
25097	}
25098	// match: (CondSelect <t> x y check)
25099	// cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
25100	// result: (CMOVLNE y x (CMPQconst [0] check))
25101	for {
25102		t := v.Type
25103		x := v_0
25104		y := v_1
25105		check := v_2
25106		if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
25107			break
25108		}
25109		v.reset(OpAMD64CMOVLNE)
25110		v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25111		v0.AuxInt = int32ToAuxInt(0)
25112		v0.AddArg(check)
25113		v.AddArg3(y, x, v0)
25114		return true
25115	}
25116	// match: (CondSelect <t> x y check)
25117	// cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
25118	// result: (CMOVWNE y x (CMPQconst [0] check))
25119	for {
25120		t := v.Type
25121		x := v_0
25122		y := v_1
25123		check := v_2
25124		if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
25125			break
25126		}
25127		v.reset(OpAMD64CMOVWNE)
25128		v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25129		v0.AuxInt = int32ToAuxInt(0)
25130		v0.AddArg(check)
25131		v.AddArg3(y, x, v0)
25132		return true
25133	}
25134	return false
25135}
25136func rewriteValueAMD64_OpConst16(v *Value) bool {
25137	// match: (Const16 [c])
25138	// result: (MOVLconst [int32(c)])
25139	for {
25140		c := auxIntToInt16(v.AuxInt)
25141		v.reset(OpAMD64MOVLconst)
25142		v.AuxInt = int32ToAuxInt(int32(c))
25143		return true
25144	}
25145}
25146func rewriteValueAMD64_OpConst8(v *Value) bool {
25147	// match: (Const8 [c])
25148	// result: (MOVLconst [int32(c)])
25149	for {
25150		c := auxIntToInt8(v.AuxInt)
25151		v.reset(OpAMD64MOVLconst)
25152		v.AuxInt = int32ToAuxInt(int32(c))
25153		return true
25154	}
25155}
25156func rewriteValueAMD64_OpConstBool(v *Value) bool {
25157	// match: (ConstBool [c])
25158	// result: (MOVLconst [b2i32(c)])
25159	for {
25160		c := auxIntToBool(v.AuxInt)
25161		v.reset(OpAMD64MOVLconst)
25162		v.AuxInt = int32ToAuxInt(b2i32(c))
25163		return true
25164	}
25165}
25166func rewriteValueAMD64_OpConstNil(v *Value) bool {
25167	// match: (ConstNil )
25168	// result: (MOVQconst [0])
25169	for {
25170		v.reset(OpAMD64MOVQconst)
25171		v.AuxInt = int64ToAuxInt(0)
25172		return true
25173	}
25174}
25175func rewriteValueAMD64_OpCtz16(v *Value) bool {
25176	v_0 := v.Args[0]
25177	b := v.Block
25178	typ := &b.Func.Config.Types
25179	// match: (Ctz16 x)
25180	// result: (BSFL (ORLconst <typ.UInt32> [1<<16] x))
25181	for {
25182		x := v_0
25183		v.reset(OpAMD64BSFL)
25184		v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25185		v0.AuxInt = int32ToAuxInt(1 << 16)
25186		v0.AddArg(x)
25187		v.AddArg(v0)
25188		return true
25189	}
25190}
25191func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool {
25192	v_0 := v.Args[0]
25193	// match: (Ctz16NonZero x)
25194	// cond: buildcfg.GOAMD64 >= 3
25195	// result: (TZCNTL x)
25196	for {
25197		x := v_0
25198		if !(buildcfg.GOAMD64 >= 3) {
25199			break
25200		}
25201		v.reset(OpAMD64TZCNTL)
25202		v.AddArg(x)
25203		return true
25204	}
25205	// match: (Ctz16NonZero x)
25206	// cond: buildcfg.GOAMD64 < 3
25207	// result: (BSFL x)
25208	for {
25209		x := v_0
25210		if !(buildcfg.GOAMD64 < 3) {
25211			break
25212		}
25213		v.reset(OpAMD64BSFL)
25214		v.AddArg(x)
25215		return true
25216	}
25217	return false
25218}
25219func rewriteValueAMD64_OpCtz32(v *Value) bool {
25220	v_0 := v.Args[0]
25221	b := v.Block
25222	typ := &b.Func.Config.Types
25223	// match: (Ctz32 x)
25224	// cond: buildcfg.GOAMD64 >= 3
25225	// result: (TZCNTL x)
25226	for {
25227		x := v_0
25228		if !(buildcfg.GOAMD64 >= 3) {
25229			break
25230		}
25231		v.reset(OpAMD64TZCNTL)
25232		v.AddArg(x)
25233		return true
25234	}
25235	// match: (Ctz32 x)
25236	// cond: buildcfg.GOAMD64 < 3
25237	// result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
25238	for {
25239		x := v_0
25240		if !(buildcfg.GOAMD64 < 3) {
25241			break
25242		}
25243		v.reset(OpSelect0)
25244		v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25245		v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
25246		v1.AuxInt = int8ToAuxInt(32)
25247		v1.AddArg(x)
25248		v0.AddArg(v1)
25249		v.AddArg(v0)
25250		return true
25251	}
25252	return false
25253}
25254func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool {
25255	v_0 := v.Args[0]
25256	// match: (Ctz32NonZero x)
25257	// cond: buildcfg.GOAMD64 >= 3
25258	// result: (TZCNTL x)
25259	for {
25260		x := v_0
25261		if !(buildcfg.GOAMD64 >= 3) {
25262			break
25263		}
25264		v.reset(OpAMD64TZCNTL)
25265		v.AddArg(x)
25266		return true
25267	}
25268	// match: (Ctz32NonZero x)
25269	// cond: buildcfg.GOAMD64 < 3
25270	// result: (BSFL x)
25271	for {
25272		x := v_0
25273		if !(buildcfg.GOAMD64 < 3) {
25274			break
25275		}
25276		v.reset(OpAMD64BSFL)
25277		v.AddArg(x)
25278		return true
25279	}
25280	return false
25281}
25282func rewriteValueAMD64_OpCtz64(v *Value) bool {
25283	v_0 := v.Args[0]
25284	b := v.Block
25285	typ := &b.Func.Config.Types
25286	// match: (Ctz64 x)
25287	// cond: buildcfg.GOAMD64 >= 3
25288	// result: (TZCNTQ x)
25289	for {
25290		x := v_0
25291		if !(buildcfg.GOAMD64 >= 3) {
25292			break
25293		}
25294		v.reset(OpAMD64TZCNTQ)
25295		v.AddArg(x)
25296		return true
25297	}
25298	// match: (Ctz64 <t> x)
25299	// cond: buildcfg.GOAMD64 < 3
25300	// result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
25301	for {
25302		t := v.Type
25303		x := v_0
25304		if !(buildcfg.GOAMD64 < 3) {
25305			break
25306		}
25307		v.reset(OpAMD64CMOVQEQ)
25308		v0 := b.NewValue0(v.Pos, OpSelect0, t)
25309		v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25310		v1.AddArg(x)
25311		v0.AddArg(v1)
25312		v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
25313		v2.AuxInt = int64ToAuxInt(64)
25314		v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
25315		v3.AddArg(v1)
25316		v.AddArg3(v0, v2, v3)
25317		return true
25318	}
25319	return false
25320}
25321func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
25322	v_0 := v.Args[0]
25323	b := v.Block
25324	typ := &b.Func.Config.Types
25325	// match: (Ctz64NonZero x)
25326	// cond: buildcfg.GOAMD64 >= 3
25327	// result: (TZCNTQ x)
25328	for {
25329		x := v_0
25330		if !(buildcfg.GOAMD64 >= 3) {
25331			break
25332		}
25333		v.reset(OpAMD64TZCNTQ)
25334		v.AddArg(x)
25335		return true
25336	}
25337	// match: (Ctz64NonZero x)
25338	// cond: buildcfg.GOAMD64 < 3
25339	// result: (Select0 (BSFQ x))
25340	for {
25341		x := v_0
25342		if !(buildcfg.GOAMD64 < 3) {
25343			break
25344		}
25345		v.reset(OpSelect0)
25346		v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25347		v0.AddArg(x)
25348		v.AddArg(v0)
25349		return true
25350	}
25351	return false
25352}
25353func rewriteValueAMD64_OpCtz8(v *Value) bool {
25354	v_0 := v.Args[0]
25355	b := v.Block
25356	typ := &b.Func.Config.Types
25357	// match: (Ctz8 x)
25358	// result: (BSFL (ORLconst <typ.UInt32> [1<<8 ] x))
25359	for {
25360		x := v_0
25361		v.reset(OpAMD64BSFL)
25362		v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25363		v0.AuxInt = int32ToAuxInt(1 << 8)
25364		v0.AddArg(x)
25365		v.AddArg(v0)
25366		return true
25367	}
25368}
25369func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool {
25370	v_0 := v.Args[0]
25371	// match: (Ctz8NonZero x)
25372	// cond: buildcfg.GOAMD64 >= 3
25373	// result: (TZCNTL x)
25374	for {
25375		x := v_0
25376		if !(buildcfg.GOAMD64 >= 3) {
25377			break
25378		}
25379		v.reset(OpAMD64TZCNTL)
25380		v.AddArg(x)
25381		return true
25382	}
25383	// match: (Ctz8NonZero x)
25384	// cond: buildcfg.GOAMD64 < 3
25385	// result: (BSFL x)
25386	for {
25387		x := v_0
25388		if !(buildcfg.GOAMD64 < 3) {
25389			break
25390		}
25391		v.reset(OpAMD64BSFL)
25392		v.AddArg(x)
25393		return true
25394	}
25395	return false
25396}
25397func rewriteValueAMD64_OpDiv16(v *Value) bool {
25398	v_1 := v.Args[1]
25399	v_0 := v.Args[0]
25400	b := v.Block
25401	typ := &b.Func.Config.Types
25402	// match: (Div16 [a] x y)
25403	// result: (Select0 (DIVW [a] x y))
25404	for {
25405		a := auxIntToBool(v.AuxInt)
25406		x := v_0
25407		y := v_1
25408		v.reset(OpSelect0)
25409		v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
25410		v0.AuxInt = boolToAuxInt(a)
25411		v0.AddArg2(x, y)
25412		v.AddArg(v0)
25413		return true
25414	}
25415}
25416func rewriteValueAMD64_OpDiv16u(v *Value) bool {
25417	v_1 := v.Args[1]
25418	v_0 := v.Args[0]
25419	b := v.Block
25420	typ := &b.Func.Config.Types
25421	// match: (Div16u x y)
25422	// result: (Select0 (DIVWU x y))
25423	for {
25424		x := v_0
25425		y := v_1
25426		v.reset(OpSelect0)
25427		v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
25428		v0.AddArg2(x, y)
25429		v.AddArg(v0)
25430		return true
25431	}
25432}
25433func rewriteValueAMD64_OpDiv32(v *Value) bool {
25434	v_1 := v.Args[1]
25435	v_0 := v.Args[0]
25436	b := v.Block
25437	typ := &b.Func.Config.Types
25438	// match: (Div32 [a] x y)
25439	// result: (Select0 (DIVL [a] x y))
25440	for {
25441		a := auxIntToBool(v.AuxInt)
25442		x := v_0
25443		y := v_1
25444		v.reset(OpSelect0)
25445		v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
25446		v0.AuxInt = boolToAuxInt(a)
25447		v0.AddArg2(x, y)
25448		v.AddArg(v0)
25449		return true
25450	}
25451}
25452func rewriteValueAMD64_OpDiv32u(v *Value) bool {
25453	v_1 := v.Args[1]
25454	v_0 := v.Args[0]
25455	b := v.Block
25456	typ := &b.Func.Config.Types
25457	// match: (Div32u x y)
25458	// result: (Select0 (DIVLU x y))
25459	for {
25460		x := v_0
25461		y := v_1
25462		v.reset(OpSelect0)
25463		v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
25464		v0.AddArg2(x, y)
25465		v.AddArg(v0)
25466		return true
25467	}
25468}
25469func rewriteValueAMD64_OpDiv64(v *Value) bool {
25470	v_1 := v.Args[1]
25471	v_0 := v.Args[0]
25472	b := v.Block
25473	typ := &b.Func.Config.Types
25474	// match: (Div64 [a] x y)
25475	// result: (Select0 (DIVQ [a] x y))
25476	for {
25477		a := auxIntToBool(v.AuxInt)
25478		x := v_0
25479		y := v_1
25480		v.reset(OpSelect0)
25481		v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
25482		v0.AuxInt = boolToAuxInt(a)
25483		v0.AddArg2(x, y)
25484		v.AddArg(v0)
25485		return true
25486	}
25487}
25488func rewriteValueAMD64_OpDiv64u(v *Value) bool {
25489	v_1 := v.Args[1]
25490	v_0 := v.Args[0]
25491	b := v.Block
25492	typ := &b.Func.Config.Types
25493	// match: (Div64u x y)
25494	// result: (Select0 (DIVQU x y))
25495	for {
25496		x := v_0
25497		y := v_1
25498		v.reset(OpSelect0)
25499		v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
25500		v0.AddArg2(x, y)
25501		v.AddArg(v0)
25502		return true
25503	}
25504}
25505func rewriteValueAMD64_OpDiv8(v *Value) bool {
25506	v_1 := v.Args[1]
25507	v_0 := v.Args[0]
25508	b := v.Block
25509	typ := &b.Func.Config.Types
25510	// match: (Div8 x y)
25511	// result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
25512	for {
25513		x := v_0
25514		y := v_1
25515		v.reset(OpSelect0)
25516		v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
25517		v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
25518		v1.AddArg(x)
25519		v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
25520		v2.AddArg(y)
25521		v0.AddArg2(v1, v2)
25522		v.AddArg(v0)
25523		return true
25524	}
25525}
25526func rewriteValueAMD64_OpDiv8u(v *Value) bool {
25527	v_1 := v.Args[1]
25528	v_0 := v.Args[0]
25529	b := v.Block
25530	typ := &b.Func.Config.Types
25531	// match: (Div8u x y)
25532	// result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
25533	for {
25534		x := v_0
25535		y := v_1
25536		v.reset(OpSelect0)
25537		v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
25538		v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
25539		v1.AddArg(x)
25540		v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
25541		v2.AddArg(y)
25542		v0.AddArg2(v1, v2)
25543		v.AddArg(v0)
25544		return true
25545	}
25546}
25547func rewriteValueAMD64_OpEq16(v *Value) bool {
25548	v_1 := v.Args[1]
25549	v_0 := v.Args[0]
25550	b := v.Block
25551	// match: (Eq16 x y)
25552	// result: (SETEQ (CMPW x y))
25553	for {
25554		x := v_0
25555		y := v_1
25556		v.reset(OpAMD64SETEQ)
25557		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25558		v0.AddArg2(x, y)
25559		v.AddArg(v0)
25560		return true
25561	}
25562}
25563func rewriteValueAMD64_OpEq32(v *Value) bool {
25564	v_1 := v.Args[1]
25565	v_0 := v.Args[0]
25566	b := v.Block
25567	// match: (Eq32 x y)
25568	// result: (SETEQ (CMPL x y))
25569	for {
25570		x := v_0
25571		y := v_1
25572		v.reset(OpAMD64SETEQ)
25573		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25574		v0.AddArg2(x, y)
25575		v.AddArg(v0)
25576		return true
25577	}
25578}
25579func rewriteValueAMD64_OpEq32F(v *Value) bool {
25580	v_1 := v.Args[1]
25581	v_0 := v.Args[0]
25582	b := v.Block
25583	// match: (Eq32F x y)
25584	// result: (SETEQF (UCOMISS x y))
25585	for {
25586		x := v_0
25587		y := v_1
25588		v.reset(OpAMD64SETEQF)
25589		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
25590		v0.AddArg2(x, y)
25591		v.AddArg(v0)
25592		return true
25593	}
25594}
25595func rewriteValueAMD64_OpEq64(v *Value) bool {
25596	v_1 := v.Args[1]
25597	v_0 := v.Args[0]
25598	b := v.Block
25599	// match: (Eq64 x y)
25600	// result: (SETEQ (CMPQ x y))
25601	for {
25602		x := v_0
25603		y := v_1
25604		v.reset(OpAMD64SETEQ)
25605		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25606		v0.AddArg2(x, y)
25607		v.AddArg(v0)
25608		return true
25609	}
25610}
25611func rewriteValueAMD64_OpEq64F(v *Value) bool {
25612	v_1 := v.Args[1]
25613	v_0 := v.Args[0]
25614	b := v.Block
25615	// match: (Eq64F x y)
25616	// result: (SETEQF (UCOMISD x y))
25617	for {
25618		x := v_0
25619		y := v_1
25620		v.reset(OpAMD64SETEQF)
25621		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
25622		v0.AddArg2(x, y)
25623		v.AddArg(v0)
25624		return true
25625	}
25626}
25627func rewriteValueAMD64_OpEq8(v *Value) bool {
25628	v_1 := v.Args[1]
25629	v_0 := v.Args[0]
25630	b := v.Block
25631	// match: (Eq8 x y)
25632	// result: (SETEQ (CMPB x y))
25633	for {
25634		x := v_0
25635		y := v_1
25636		v.reset(OpAMD64SETEQ)
25637		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25638		v0.AddArg2(x, y)
25639		v.AddArg(v0)
25640		return true
25641	}
25642}
25643func rewriteValueAMD64_OpEqB(v *Value) bool {
25644	v_1 := v.Args[1]
25645	v_0 := v.Args[0]
25646	b := v.Block
25647	// match: (EqB x y)
25648	// result: (SETEQ (CMPB x y))
25649	for {
25650		x := v_0
25651		y := v_1
25652		v.reset(OpAMD64SETEQ)
25653		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25654		v0.AddArg2(x, y)
25655		v.AddArg(v0)
25656		return true
25657	}
25658}
25659func rewriteValueAMD64_OpEqPtr(v *Value) bool {
25660	v_1 := v.Args[1]
25661	v_0 := v.Args[0]
25662	b := v.Block
25663	// match: (EqPtr x y)
25664	// result: (SETEQ (CMPQ x y))
25665	for {
25666		x := v_0
25667		y := v_1
25668		v.reset(OpAMD64SETEQ)
25669		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25670		v0.AddArg2(x, y)
25671		v.AddArg(v0)
25672		return true
25673	}
25674}
25675func rewriteValueAMD64_OpFMA(v *Value) bool {
25676	v_2 := v.Args[2]
25677	v_1 := v.Args[1]
25678	v_0 := v.Args[0]
25679	// match: (FMA x y z)
25680	// result: (VFMADD231SD z x y)
25681	for {
25682		x := v_0
25683		y := v_1
25684		z := v_2
25685		v.reset(OpAMD64VFMADD231SD)
25686		v.AddArg3(z, x, y)
25687		return true
25688	}
25689}
25690func rewriteValueAMD64_OpFloor(v *Value) bool {
25691	v_0 := v.Args[0]
25692	// match: (Floor x)
25693	// result: (ROUNDSD [1] x)
25694	for {
25695		x := v_0
25696		v.reset(OpAMD64ROUNDSD)
25697		v.AuxInt = int8ToAuxInt(1)
25698		v.AddArg(x)
25699		return true
25700	}
25701}
25702func rewriteValueAMD64_OpGetG(v *Value) bool {
25703	v_0 := v.Args[0]
25704	// match: (GetG mem)
25705	// cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal
25706	// result: (LoweredGetG mem)
25707	for {
25708		mem := v_0
25709		if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
25710			break
25711		}
25712		v.reset(OpAMD64LoweredGetG)
25713		v.AddArg(mem)
25714		return true
25715	}
25716	return false
25717}
25718func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
25719	b := v.Block
25720	typ := &b.Func.Config.Types
25721	// match: (HasCPUFeature {s})
25722	// result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s})))
25723	for {
25724		s := auxToSym(v.Aux)
25725		v.reset(OpAMD64SETNE)
25726		v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
25727		v0.AuxInt = int32ToAuxInt(0)
25728		v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
25729		v1.Aux = symToAux(s)
25730		v0.AddArg(v1)
25731		v.AddArg(v0)
25732		return true
25733	}
25734}
25735func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
25736	v_1 := v.Args[1]
25737	v_0 := v.Args[0]
25738	b := v.Block
25739	// match: (IsInBounds idx len)
25740	// result: (SETB (CMPQ idx len))
25741	for {
25742		idx := v_0
25743		len := v_1
25744		v.reset(OpAMD64SETB)
25745		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25746		v0.AddArg2(idx, len)
25747		v.AddArg(v0)
25748		return true
25749	}
25750}
25751func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
25752	v_0 := v.Args[0]
25753	b := v.Block
25754	// match: (IsNonNil p)
25755	// result: (SETNE (TESTQ p p))
25756	for {
25757		p := v_0
25758		v.reset(OpAMD64SETNE)
25759		v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
25760		v0.AddArg2(p, p)
25761		v.AddArg(v0)
25762		return true
25763	}
25764}
25765func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
25766	v_1 := v.Args[1]
25767	v_0 := v.Args[0]
25768	b := v.Block
25769	// match: (IsSliceInBounds idx len)
25770	// result: (SETBE (CMPQ idx len))
25771	for {
25772		idx := v_0
25773		len := v_1
25774		v.reset(OpAMD64SETBE)
25775		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25776		v0.AddArg2(idx, len)
25777		v.AddArg(v0)
25778		return true
25779	}
25780}
25781func rewriteValueAMD64_OpLeq16(v *Value) bool {
25782	v_1 := v.Args[1]
25783	v_0 := v.Args[0]
25784	b := v.Block
25785	// match: (Leq16 x y)
25786	// result: (SETLE (CMPW x y))
25787	for {
25788		x := v_0
25789		y := v_1
25790		v.reset(OpAMD64SETLE)
25791		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25792		v0.AddArg2(x, y)
25793		v.AddArg(v0)
25794		return true
25795	}
25796}
25797func rewriteValueAMD64_OpLeq16U(v *Value) bool {
25798	v_1 := v.Args[1]
25799	v_0 := v.Args[0]
25800	b := v.Block
25801	// match: (Leq16U x y)
25802	// result: (SETBE (CMPW x y))
25803	for {
25804		x := v_0
25805		y := v_1
25806		v.reset(OpAMD64SETBE)
25807		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25808		v0.AddArg2(x, y)
25809		v.AddArg(v0)
25810		return true
25811	}
25812}
25813func rewriteValueAMD64_OpLeq32(v *Value) bool {
25814	v_1 := v.Args[1]
25815	v_0 := v.Args[0]
25816	b := v.Block
25817	// match: (Leq32 x y)
25818	// result: (SETLE (CMPL x y))
25819	for {
25820		x := v_0
25821		y := v_1
25822		v.reset(OpAMD64SETLE)
25823		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25824		v0.AddArg2(x, y)
25825		v.AddArg(v0)
25826		return true
25827	}
25828}
25829func rewriteValueAMD64_OpLeq32F(v *Value) bool {
25830	v_1 := v.Args[1]
25831	v_0 := v.Args[0]
25832	b := v.Block
25833	// match: (Leq32F x y)
25834	// result: (SETGEF (UCOMISS y x))
25835	for {
25836		x := v_0
25837		y := v_1
25838		v.reset(OpAMD64SETGEF)
25839		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
25840		v0.AddArg2(y, x)
25841		v.AddArg(v0)
25842		return true
25843	}
25844}
25845func rewriteValueAMD64_OpLeq32U(v *Value) bool {
25846	v_1 := v.Args[1]
25847	v_0 := v.Args[0]
25848	b := v.Block
25849	// match: (Leq32U x y)
25850	// result: (SETBE (CMPL x y))
25851	for {
25852		x := v_0
25853		y := v_1
25854		v.reset(OpAMD64SETBE)
25855		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25856		v0.AddArg2(x, y)
25857		v.AddArg(v0)
25858		return true
25859	}
25860}
25861func rewriteValueAMD64_OpLeq64(v *Value) bool {
25862	v_1 := v.Args[1]
25863	v_0 := v.Args[0]
25864	b := v.Block
25865	// match: (Leq64 x y)
25866	// result: (SETLE (CMPQ x y))
25867	for {
25868		x := v_0
25869		y := v_1
25870		v.reset(OpAMD64SETLE)
25871		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25872		v0.AddArg2(x, y)
25873		v.AddArg(v0)
25874		return true
25875	}
25876}
25877func rewriteValueAMD64_OpLeq64F(v *Value) bool {
25878	v_1 := v.Args[1]
25879	v_0 := v.Args[0]
25880	b := v.Block
25881	// match: (Leq64F x y)
25882	// result: (SETGEF (UCOMISD y x))
25883	for {
25884		x := v_0
25885		y := v_1
25886		v.reset(OpAMD64SETGEF)
25887		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
25888		v0.AddArg2(y, x)
25889		v.AddArg(v0)
25890		return true
25891	}
25892}
25893func rewriteValueAMD64_OpLeq64U(v *Value) bool {
25894	v_1 := v.Args[1]
25895	v_0 := v.Args[0]
25896	b := v.Block
25897	// match: (Leq64U x y)
25898	// result: (SETBE (CMPQ x y))
25899	for {
25900		x := v_0
25901		y := v_1
25902		v.reset(OpAMD64SETBE)
25903		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25904		v0.AddArg2(x, y)
25905		v.AddArg(v0)
25906		return true
25907	}
25908}
25909func rewriteValueAMD64_OpLeq8(v *Value) bool {
25910	v_1 := v.Args[1]
25911	v_0 := v.Args[0]
25912	b := v.Block
25913	// match: (Leq8 x y)
25914	// result: (SETLE (CMPB x y))
25915	for {
25916		x := v_0
25917		y := v_1
25918		v.reset(OpAMD64SETLE)
25919		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25920		v0.AddArg2(x, y)
25921		v.AddArg(v0)
25922		return true
25923	}
25924}
25925func rewriteValueAMD64_OpLeq8U(v *Value) bool {
25926	v_1 := v.Args[1]
25927	v_0 := v.Args[0]
25928	b := v.Block
25929	// match: (Leq8U x y)
25930	// result: (SETBE (CMPB x y))
25931	for {
25932		x := v_0
25933		y := v_1
25934		v.reset(OpAMD64SETBE)
25935		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25936		v0.AddArg2(x, y)
25937		v.AddArg(v0)
25938		return true
25939	}
25940}
25941func rewriteValueAMD64_OpLess16(v *Value) bool {
25942	v_1 := v.Args[1]
25943	v_0 := v.Args[0]
25944	b := v.Block
25945	// match: (Less16 x y)
25946	// result: (SETL (CMPW x y))
25947	for {
25948		x := v_0
25949		y := v_1
25950		v.reset(OpAMD64SETL)
25951		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25952		v0.AddArg2(x, y)
25953		v.AddArg(v0)
25954		return true
25955	}
25956}
25957func rewriteValueAMD64_OpLess16U(v *Value) bool {
25958	v_1 := v.Args[1]
25959	v_0 := v.Args[0]
25960	b := v.Block
25961	// match: (Less16U x y)
25962	// result: (SETB (CMPW x y))
25963	for {
25964		x := v_0
25965		y := v_1
25966		v.reset(OpAMD64SETB)
25967		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25968		v0.AddArg2(x, y)
25969		v.AddArg(v0)
25970		return true
25971	}
25972}
25973func rewriteValueAMD64_OpLess32(v *Value) bool {
25974	v_1 := v.Args[1]
25975	v_0 := v.Args[0]
25976	b := v.Block
25977	// match: (Less32 x y)
25978	// result: (SETL (CMPL x y))
25979	for {
25980		x := v_0
25981		y := v_1
25982		v.reset(OpAMD64SETL)
25983		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25984		v0.AddArg2(x, y)
25985		v.AddArg(v0)
25986		return true
25987	}
25988}
25989func rewriteValueAMD64_OpLess32F(v *Value) bool {
25990	v_1 := v.Args[1]
25991	v_0 := v.Args[0]
25992	b := v.Block
25993	// match: (Less32F x y)
25994	// result: (SETGF (UCOMISS y x))
25995	for {
25996		x := v_0
25997		y := v_1
25998		v.reset(OpAMD64SETGF)
25999		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
26000		v0.AddArg2(y, x)
26001		v.AddArg(v0)
26002		return true
26003	}
26004}
26005func rewriteValueAMD64_OpLess32U(v *Value) bool {
26006	v_1 := v.Args[1]
26007	v_0 := v.Args[0]
26008	b := v.Block
26009	// match: (Less32U x y)
26010	// result: (SETB (CMPL x y))
26011	for {
26012		x := v_0
26013		y := v_1
26014		v.reset(OpAMD64SETB)
26015		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26016		v0.AddArg2(x, y)
26017		v.AddArg(v0)
26018		return true
26019	}
26020}
26021func rewriteValueAMD64_OpLess64(v *Value) bool {
26022	v_1 := v.Args[1]
26023	v_0 := v.Args[0]
26024	b := v.Block
26025	// match: (Less64 x y)
26026	// result: (SETL (CMPQ x y))
26027	for {
26028		x := v_0
26029		y := v_1
26030		v.reset(OpAMD64SETL)
26031		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26032		v0.AddArg2(x, y)
26033		v.AddArg(v0)
26034		return true
26035	}
26036}
26037func rewriteValueAMD64_OpLess64F(v *Value) bool {
26038	v_1 := v.Args[1]
26039	v_0 := v.Args[0]
26040	b := v.Block
26041	// match: (Less64F x y)
26042	// result: (SETGF (UCOMISD y x))
26043	for {
26044		x := v_0
26045		y := v_1
26046		v.reset(OpAMD64SETGF)
26047		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
26048		v0.AddArg2(y, x)
26049		v.AddArg(v0)
26050		return true
26051	}
26052}
26053func rewriteValueAMD64_OpLess64U(v *Value) bool {
26054	v_1 := v.Args[1]
26055	v_0 := v.Args[0]
26056	b := v.Block
26057	// match: (Less64U x y)
26058	// result: (SETB (CMPQ x y))
26059	for {
26060		x := v_0
26061		y := v_1
26062		v.reset(OpAMD64SETB)
26063		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26064		v0.AddArg2(x, y)
26065		v.AddArg(v0)
26066		return true
26067	}
26068}
26069func rewriteValueAMD64_OpLess8(v *Value) bool {
26070	v_1 := v.Args[1]
26071	v_0 := v.Args[0]
26072	b := v.Block
26073	// match: (Less8 x y)
26074	// result: (SETL (CMPB x y))
26075	for {
26076		x := v_0
26077		y := v_1
26078		v.reset(OpAMD64SETL)
26079		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26080		v0.AddArg2(x, y)
26081		v.AddArg(v0)
26082		return true
26083	}
26084}
26085func rewriteValueAMD64_OpLess8U(v *Value) bool {
26086	v_1 := v.Args[1]
26087	v_0 := v.Args[0]
26088	b := v.Block
26089	// match: (Less8U x y)
26090	// result: (SETB (CMPB x y))
26091	for {
26092		x := v_0
26093		y := v_1
26094		v.reset(OpAMD64SETB)
26095		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26096		v0.AddArg2(x, y)
26097		v.AddArg(v0)
26098		return true
26099	}
26100}
26101func rewriteValueAMD64_OpLoad(v *Value) bool {
26102	v_1 := v.Args[1]
26103	v_0 := v.Args[0]
26104	// match: (Load <t> ptr mem)
26105	// cond: (is64BitInt(t) || isPtr(t))
26106	// result: (MOVQload ptr mem)
26107	for {
26108		t := v.Type
26109		ptr := v_0
26110		mem := v_1
26111		if !(is64BitInt(t) || isPtr(t)) {
26112			break
26113		}
26114		v.reset(OpAMD64MOVQload)
26115		v.AddArg2(ptr, mem)
26116		return true
26117	}
26118	// match: (Load <t> ptr mem)
26119	// cond: is32BitInt(t)
26120	// result: (MOVLload ptr mem)
26121	for {
26122		t := v.Type
26123		ptr := v_0
26124		mem := v_1
26125		if !(is32BitInt(t)) {
26126			break
26127		}
26128		v.reset(OpAMD64MOVLload)
26129		v.AddArg2(ptr, mem)
26130		return true
26131	}
26132	// match: (Load <t> ptr mem)
26133	// cond: is16BitInt(t)
26134	// result: (MOVWload ptr mem)
26135	for {
26136		t := v.Type
26137		ptr := v_0
26138		mem := v_1
26139		if !(is16BitInt(t)) {
26140			break
26141		}
26142		v.reset(OpAMD64MOVWload)
26143		v.AddArg2(ptr, mem)
26144		return true
26145	}
26146	// match: (Load <t> ptr mem)
26147	// cond: (t.IsBoolean() || is8BitInt(t))
26148	// result: (MOVBload ptr mem)
26149	for {
26150		t := v.Type
26151		ptr := v_0
26152		mem := v_1
26153		if !(t.IsBoolean() || is8BitInt(t)) {
26154			break
26155		}
26156		v.reset(OpAMD64MOVBload)
26157		v.AddArg2(ptr, mem)
26158		return true
26159	}
26160	// match: (Load <t> ptr mem)
26161	// cond: is32BitFloat(t)
26162	// result: (MOVSSload ptr mem)
26163	for {
26164		t := v.Type
26165		ptr := v_0
26166		mem := v_1
26167		if !(is32BitFloat(t)) {
26168			break
26169		}
26170		v.reset(OpAMD64MOVSSload)
26171		v.AddArg2(ptr, mem)
26172		return true
26173	}
26174	// match: (Load <t> ptr mem)
26175	// cond: is64BitFloat(t)
26176	// result: (MOVSDload ptr mem)
26177	for {
26178		t := v.Type
26179		ptr := v_0
26180		mem := v_1
26181		if !(is64BitFloat(t)) {
26182			break
26183		}
26184		v.reset(OpAMD64MOVSDload)
26185		v.AddArg2(ptr, mem)
26186		return true
26187	}
26188	return false
26189}
26190func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
26191	v_1 := v.Args[1]
26192	v_0 := v.Args[0]
26193	b := v.Block
26194	typ := &b.Func.Config.Types
26195	// match: (LocalAddr <t> {sym} base mem)
26196	// cond: t.Elem().HasPointers()
26197	// result: (LEAQ {sym} (SPanchored base mem))
26198	for {
26199		t := v.Type
26200		sym := auxToSym(v.Aux)
26201		base := v_0
26202		mem := v_1
26203		if !(t.Elem().HasPointers()) {
26204			break
26205		}
26206		v.reset(OpAMD64LEAQ)
26207		v.Aux = symToAux(sym)
26208		v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
26209		v0.AddArg2(base, mem)
26210		v.AddArg(v0)
26211		return true
26212	}
26213	// match: (LocalAddr <t> {sym} base _)
26214	// cond: !t.Elem().HasPointers()
26215	// result: (LEAQ {sym} base)
26216	for {
26217		t := v.Type
26218		sym := auxToSym(v.Aux)
26219		base := v_0
26220		if !(!t.Elem().HasPointers()) {
26221			break
26222		}
26223		v.reset(OpAMD64LEAQ)
26224		v.Aux = symToAux(sym)
26225		v.AddArg(base)
26226		return true
26227	}
26228	return false
26229}
26230func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
26231	v_1 := v.Args[1]
26232	v_0 := v.Args[0]
26233	b := v.Block
26234	// match: (Lsh16x16 <t> x y)
26235	// cond: !shiftIsBounded(v)
26236	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
26237	for {
26238		t := v.Type
26239		x := v_0
26240		y := v_1
26241		if !(!shiftIsBounded(v)) {
26242			break
26243		}
26244		v.reset(OpAMD64ANDL)
26245		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26246		v0.AddArg2(x, y)
26247		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26248		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26249		v2.AuxInt = int16ToAuxInt(32)
26250		v2.AddArg(y)
26251		v1.AddArg(v2)
26252		v.AddArg2(v0, v1)
26253		return true
26254	}
26255	// match: (Lsh16x16 x y)
26256	// cond: shiftIsBounded(v)
26257	// result: (SHLL x y)
26258	for {
26259		x := v_0
26260		y := v_1
26261		if !(shiftIsBounded(v)) {
26262			break
26263		}
26264		v.reset(OpAMD64SHLL)
26265		v.AddArg2(x, y)
26266		return true
26267	}
26268	return false
26269}
26270func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
26271	v_1 := v.Args[1]
26272	v_0 := v.Args[0]
26273	b := v.Block
26274	// match: (Lsh16x32 <t> x y)
26275	// cond: !shiftIsBounded(v)
26276	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
26277	for {
26278		t := v.Type
26279		x := v_0
26280		y := v_1
26281		if !(!shiftIsBounded(v)) {
26282			break
26283		}
26284		v.reset(OpAMD64ANDL)
26285		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26286		v0.AddArg2(x, y)
26287		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26288		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26289		v2.AuxInt = int32ToAuxInt(32)
26290		v2.AddArg(y)
26291		v1.AddArg(v2)
26292		v.AddArg2(v0, v1)
26293		return true
26294	}
26295	// match: (Lsh16x32 x y)
26296	// cond: shiftIsBounded(v)
26297	// result: (SHLL x y)
26298	for {
26299		x := v_0
26300		y := v_1
26301		if !(shiftIsBounded(v)) {
26302			break
26303		}
26304		v.reset(OpAMD64SHLL)
26305		v.AddArg2(x, y)
26306		return true
26307	}
26308	return false
26309}
26310func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
26311	v_1 := v.Args[1]
26312	v_0 := v.Args[0]
26313	b := v.Block
26314	// match: (Lsh16x64 <t> x y)
26315	// cond: !shiftIsBounded(v)
26316	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
26317	for {
26318		t := v.Type
26319		x := v_0
26320		y := v_1
26321		if !(!shiftIsBounded(v)) {
26322			break
26323		}
26324		v.reset(OpAMD64ANDL)
26325		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26326		v0.AddArg2(x, y)
26327		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26328		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26329		v2.AuxInt = int32ToAuxInt(32)
26330		v2.AddArg(y)
26331		v1.AddArg(v2)
26332		v.AddArg2(v0, v1)
26333		return true
26334	}
26335	// match: (Lsh16x64 x y)
26336	// cond: shiftIsBounded(v)
26337	// result: (SHLL x y)
26338	for {
26339		x := v_0
26340		y := v_1
26341		if !(shiftIsBounded(v)) {
26342			break
26343		}
26344		v.reset(OpAMD64SHLL)
26345		v.AddArg2(x, y)
26346		return true
26347	}
26348	return false
26349}
26350func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
26351	v_1 := v.Args[1]
26352	v_0 := v.Args[0]
26353	b := v.Block
26354	// match: (Lsh16x8 <t> x y)
26355	// cond: !shiftIsBounded(v)
26356	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
26357	for {
26358		t := v.Type
26359		x := v_0
26360		y := v_1
26361		if !(!shiftIsBounded(v)) {
26362			break
26363		}
26364		v.reset(OpAMD64ANDL)
26365		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26366		v0.AddArg2(x, y)
26367		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26368		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26369		v2.AuxInt = int8ToAuxInt(32)
26370		v2.AddArg(y)
26371		v1.AddArg(v2)
26372		v.AddArg2(v0, v1)
26373		return true
26374	}
26375	// match: (Lsh16x8 x y)
26376	// cond: shiftIsBounded(v)
26377	// result: (SHLL x y)
26378	for {
26379		x := v_0
26380		y := v_1
26381		if !(shiftIsBounded(v)) {
26382			break
26383		}
26384		v.reset(OpAMD64SHLL)
26385		v.AddArg2(x, y)
26386		return true
26387	}
26388	return false
26389}
26390func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
26391	v_1 := v.Args[1]
26392	v_0 := v.Args[0]
26393	b := v.Block
26394	// match: (Lsh32x16 <t> x y)
26395	// cond: !shiftIsBounded(v)
26396	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
26397	for {
26398		t := v.Type
26399		x := v_0
26400		y := v_1
26401		if !(!shiftIsBounded(v)) {
26402			break
26403		}
26404		v.reset(OpAMD64ANDL)
26405		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26406		v0.AddArg2(x, y)
26407		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26408		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26409		v2.AuxInt = int16ToAuxInt(32)
26410		v2.AddArg(y)
26411		v1.AddArg(v2)
26412		v.AddArg2(v0, v1)
26413		return true
26414	}
26415	// match: (Lsh32x16 x y)
26416	// cond: shiftIsBounded(v)
26417	// result: (SHLL x y)
26418	for {
26419		x := v_0
26420		y := v_1
26421		if !(shiftIsBounded(v)) {
26422			break
26423		}
26424		v.reset(OpAMD64SHLL)
26425		v.AddArg2(x, y)
26426		return true
26427	}
26428	return false
26429}
26430func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
26431	v_1 := v.Args[1]
26432	v_0 := v.Args[0]
26433	b := v.Block
26434	// match: (Lsh32x32 <t> x y)
26435	// cond: !shiftIsBounded(v)
26436	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
26437	for {
26438		t := v.Type
26439		x := v_0
26440		y := v_1
26441		if !(!shiftIsBounded(v)) {
26442			break
26443		}
26444		v.reset(OpAMD64ANDL)
26445		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26446		v0.AddArg2(x, y)
26447		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26448		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26449		v2.AuxInt = int32ToAuxInt(32)
26450		v2.AddArg(y)
26451		v1.AddArg(v2)
26452		v.AddArg2(v0, v1)
26453		return true
26454	}
26455	// match: (Lsh32x32 x y)
26456	// cond: shiftIsBounded(v)
26457	// result: (SHLL x y)
26458	for {
26459		x := v_0
26460		y := v_1
26461		if !(shiftIsBounded(v)) {
26462			break
26463		}
26464		v.reset(OpAMD64SHLL)
26465		v.AddArg2(x, y)
26466		return true
26467	}
26468	return false
26469}
26470func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
26471	v_1 := v.Args[1]
26472	v_0 := v.Args[0]
26473	b := v.Block
26474	// match: (Lsh32x64 <t> x y)
26475	// cond: !shiftIsBounded(v)
26476	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
26477	for {
26478		t := v.Type
26479		x := v_0
26480		y := v_1
26481		if !(!shiftIsBounded(v)) {
26482			break
26483		}
26484		v.reset(OpAMD64ANDL)
26485		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26486		v0.AddArg2(x, y)
26487		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26488		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26489		v2.AuxInt = int32ToAuxInt(32)
26490		v2.AddArg(y)
26491		v1.AddArg(v2)
26492		v.AddArg2(v0, v1)
26493		return true
26494	}
26495	// match: (Lsh32x64 x y)
26496	// cond: shiftIsBounded(v)
26497	// result: (SHLL x y)
26498	for {
26499		x := v_0
26500		y := v_1
26501		if !(shiftIsBounded(v)) {
26502			break
26503		}
26504		v.reset(OpAMD64SHLL)
26505		v.AddArg2(x, y)
26506		return true
26507	}
26508	return false
26509}
26510func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
26511	v_1 := v.Args[1]
26512	v_0 := v.Args[0]
26513	b := v.Block
26514	// match: (Lsh32x8 <t> x y)
26515	// cond: !shiftIsBounded(v)
26516	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
26517	for {
26518		t := v.Type
26519		x := v_0
26520		y := v_1
26521		if !(!shiftIsBounded(v)) {
26522			break
26523		}
26524		v.reset(OpAMD64ANDL)
26525		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26526		v0.AddArg2(x, y)
26527		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26528		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26529		v2.AuxInt = int8ToAuxInt(32)
26530		v2.AddArg(y)
26531		v1.AddArg(v2)
26532		v.AddArg2(v0, v1)
26533		return true
26534	}
26535	// match: (Lsh32x8 x y)
26536	// cond: shiftIsBounded(v)
26537	// result: (SHLL x y)
26538	for {
26539		x := v_0
26540		y := v_1
26541		if !(shiftIsBounded(v)) {
26542			break
26543		}
26544		v.reset(OpAMD64SHLL)
26545		v.AddArg2(x, y)
26546		return true
26547	}
26548	return false
26549}
26550func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
26551	v_1 := v.Args[1]
26552	v_0 := v.Args[0]
26553	b := v.Block
26554	// match: (Lsh64x16 <t> x y)
26555	// cond: !shiftIsBounded(v)
26556	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
26557	for {
26558		t := v.Type
26559		x := v_0
26560		y := v_1
26561		if !(!shiftIsBounded(v)) {
26562			break
26563		}
26564		v.reset(OpAMD64ANDQ)
26565		v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26566		v0.AddArg2(x, y)
26567		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26568		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26569		v2.AuxInt = int16ToAuxInt(64)
26570		v2.AddArg(y)
26571		v1.AddArg(v2)
26572		v.AddArg2(v0, v1)
26573		return true
26574	}
26575	// match: (Lsh64x16 x y)
26576	// cond: shiftIsBounded(v)
26577	// result: (SHLQ x y)
26578	for {
26579		x := v_0
26580		y := v_1
26581		if !(shiftIsBounded(v)) {
26582			break
26583		}
26584		v.reset(OpAMD64SHLQ)
26585		v.AddArg2(x, y)
26586		return true
26587	}
26588	return false
26589}
26590func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
26591	v_1 := v.Args[1]
26592	v_0 := v.Args[0]
26593	b := v.Block
26594	// match: (Lsh64x32 <t> x y)
26595	// cond: !shiftIsBounded(v)
26596	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
26597	for {
26598		t := v.Type
26599		x := v_0
26600		y := v_1
26601		if !(!shiftIsBounded(v)) {
26602			break
26603		}
26604		v.reset(OpAMD64ANDQ)
26605		v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26606		v0.AddArg2(x, y)
26607		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26608		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26609		v2.AuxInt = int32ToAuxInt(64)
26610		v2.AddArg(y)
26611		v1.AddArg(v2)
26612		v.AddArg2(v0, v1)
26613		return true
26614	}
26615	// match: (Lsh64x32 x y)
26616	// cond: shiftIsBounded(v)
26617	// result: (SHLQ x y)
26618	for {
26619		x := v_0
26620		y := v_1
26621		if !(shiftIsBounded(v)) {
26622			break
26623		}
26624		v.reset(OpAMD64SHLQ)
26625		v.AddArg2(x, y)
26626		return true
26627	}
26628	return false
26629}
26630func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
26631	v_1 := v.Args[1]
26632	v_0 := v.Args[0]
26633	b := v.Block
26634	// match: (Lsh64x64 <t> x y)
26635	// cond: !shiftIsBounded(v)
26636	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
26637	for {
26638		t := v.Type
26639		x := v_0
26640		y := v_1
26641		if !(!shiftIsBounded(v)) {
26642			break
26643		}
26644		v.reset(OpAMD64ANDQ)
26645		v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26646		v0.AddArg2(x, y)
26647		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26648		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26649		v2.AuxInt = int32ToAuxInt(64)
26650		v2.AddArg(y)
26651		v1.AddArg(v2)
26652		v.AddArg2(v0, v1)
26653		return true
26654	}
26655	// match: (Lsh64x64 x y)
26656	// cond: shiftIsBounded(v)
26657	// result: (SHLQ x y)
26658	for {
26659		x := v_0
26660		y := v_1
26661		if !(shiftIsBounded(v)) {
26662			break
26663		}
26664		v.reset(OpAMD64SHLQ)
26665		v.AddArg2(x, y)
26666		return true
26667	}
26668	return false
26669}
26670func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
26671	v_1 := v.Args[1]
26672	v_0 := v.Args[0]
26673	b := v.Block
26674	// match: (Lsh64x8 <t> x y)
26675	// cond: !shiftIsBounded(v)
26676	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
26677	for {
26678		t := v.Type
26679		x := v_0
26680		y := v_1
26681		if !(!shiftIsBounded(v)) {
26682			break
26683		}
26684		v.reset(OpAMD64ANDQ)
26685		v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26686		v0.AddArg2(x, y)
26687		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26688		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26689		v2.AuxInt = int8ToAuxInt(64)
26690		v2.AddArg(y)
26691		v1.AddArg(v2)
26692		v.AddArg2(v0, v1)
26693		return true
26694	}
26695	// match: (Lsh64x8 x y)
26696	// cond: shiftIsBounded(v)
26697	// result: (SHLQ x y)
26698	for {
26699		x := v_0
26700		y := v_1
26701		if !(shiftIsBounded(v)) {
26702			break
26703		}
26704		v.reset(OpAMD64SHLQ)
26705		v.AddArg2(x, y)
26706		return true
26707	}
26708	return false
26709}
26710func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
26711	v_1 := v.Args[1]
26712	v_0 := v.Args[0]
26713	b := v.Block
26714	// match: (Lsh8x16 <t> x y)
26715	// cond: !shiftIsBounded(v)
26716	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
26717	for {
26718		t := v.Type
26719		x := v_0
26720		y := v_1
26721		if !(!shiftIsBounded(v)) {
26722			break
26723		}
26724		v.reset(OpAMD64ANDL)
26725		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26726		v0.AddArg2(x, y)
26727		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26728		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26729		v2.AuxInt = int16ToAuxInt(32)
26730		v2.AddArg(y)
26731		v1.AddArg(v2)
26732		v.AddArg2(v0, v1)
26733		return true
26734	}
26735	// match: (Lsh8x16 x y)
26736	// cond: shiftIsBounded(v)
26737	// result: (SHLL x y)
26738	for {
26739		x := v_0
26740		y := v_1
26741		if !(shiftIsBounded(v)) {
26742			break
26743		}
26744		v.reset(OpAMD64SHLL)
26745		v.AddArg2(x, y)
26746		return true
26747	}
26748	return false
26749}
26750func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
26751	v_1 := v.Args[1]
26752	v_0 := v.Args[0]
26753	b := v.Block
26754	// match: (Lsh8x32 <t> x y)
26755	// cond: !shiftIsBounded(v)
26756	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
26757	for {
26758		t := v.Type
26759		x := v_0
26760		y := v_1
26761		if !(!shiftIsBounded(v)) {
26762			break
26763		}
26764		v.reset(OpAMD64ANDL)
26765		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26766		v0.AddArg2(x, y)
26767		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26768		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26769		v2.AuxInt = int32ToAuxInt(32)
26770		v2.AddArg(y)
26771		v1.AddArg(v2)
26772		v.AddArg2(v0, v1)
26773		return true
26774	}
26775	// match: (Lsh8x32 x y)
26776	// cond: shiftIsBounded(v)
26777	// result: (SHLL x y)
26778	for {
26779		x := v_0
26780		y := v_1
26781		if !(shiftIsBounded(v)) {
26782			break
26783		}
26784		v.reset(OpAMD64SHLL)
26785		v.AddArg2(x, y)
26786		return true
26787	}
26788	return false
26789}
26790func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
26791	v_1 := v.Args[1]
26792	v_0 := v.Args[0]
26793	b := v.Block
26794	// match: (Lsh8x64 <t> x y)
26795	// cond: !shiftIsBounded(v)
26796	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
26797	for {
26798		t := v.Type
26799		x := v_0
26800		y := v_1
26801		if !(!shiftIsBounded(v)) {
26802			break
26803		}
26804		v.reset(OpAMD64ANDL)
26805		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26806		v0.AddArg2(x, y)
26807		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26808		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26809		v2.AuxInt = int32ToAuxInt(32)
26810		v2.AddArg(y)
26811		v1.AddArg(v2)
26812		v.AddArg2(v0, v1)
26813		return true
26814	}
26815	// match: (Lsh8x64 x y)
26816	// cond: shiftIsBounded(v)
26817	// result: (SHLL x y)
26818	for {
26819		x := v_0
26820		y := v_1
26821		if !(shiftIsBounded(v)) {
26822			break
26823		}
26824		v.reset(OpAMD64SHLL)
26825		v.AddArg2(x, y)
26826		return true
26827	}
26828	return false
26829}
26830func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
26831	v_1 := v.Args[1]
26832	v_0 := v.Args[0]
26833	b := v.Block
26834	// match: (Lsh8x8 <t> x y)
26835	// cond: !shiftIsBounded(v)
26836	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
26837	for {
26838		t := v.Type
26839		x := v_0
26840		y := v_1
26841		if !(!shiftIsBounded(v)) {
26842			break
26843		}
26844		v.reset(OpAMD64ANDL)
26845		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26846		v0.AddArg2(x, y)
26847		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26848		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26849		v2.AuxInt = int8ToAuxInt(32)
26850		v2.AddArg(y)
26851		v1.AddArg(v2)
26852		v.AddArg2(v0, v1)
26853		return true
26854	}
26855	// match: (Lsh8x8 x y)
26856	// cond: shiftIsBounded(v)
26857	// result: (SHLL x y)
26858	for {
26859		x := v_0
26860		y := v_1
26861		if !(shiftIsBounded(v)) {
26862			break
26863		}
26864		v.reset(OpAMD64SHLL)
26865		v.AddArg2(x, y)
26866		return true
26867	}
26868	return false
26869}
26870func rewriteValueAMD64_OpMax32F(v *Value) bool {
26871	v_1 := v.Args[1]
26872	v_0 := v.Args[0]
26873	b := v.Block
26874	// match: (Max32F <t> x y)
26875	// result: (Neg32F <t> (Min32F <t> (Neg32F <t> x) (Neg32F <t> y)))
26876	for {
26877		t := v.Type
26878		x := v_0
26879		y := v_1
26880		v.reset(OpNeg32F)
26881		v.Type = t
26882		v0 := b.NewValue0(v.Pos, OpMin32F, t)
26883		v1 := b.NewValue0(v.Pos, OpNeg32F, t)
26884		v1.AddArg(x)
26885		v2 := b.NewValue0(v.Pos, OpNeg32F, t)
26886		v2.AddArg(y)
26887		v0.AddArg2(v1, v2)
26888		v.AddArg(v0)
26889		return true
26890	}
26891}
26892func rewriteValueAMD64_OpMax64F(v *Value) bool {
26893	v_1 := v.Args[1]
26894	v_0 := v.Args[0]
26895	b := v.Block
26896	// match: (Max64F <t> x y)
26897	// result: (Neg64F <t> (Min64F <t> (Neg64F <t> x) (Neg64F <t> y)))
26898	for {
26899		t := v.Type
26900		x := v_0
26901		y := v_1
26902		v.reset(OpNeg64F)
26903		v.Type = t
26904		v0 := b.NewValue0(v.Pos, OpMin64F, t)
26905		v1 := b.NewValue0(v.Pos, OpNeg64F, t)
26906		v1.AddArg(x)
26907		v2 := b.NewValue0(v.Pos, OpNeg64F, t)
26908		v2.AddArg(y)
26909		v0.AddArg2(v1, v2)
26910		v.AddArg(v0)
26911		return true
26912	}
26913}
26914func rewriteValueAMD64_OpMin32F(v *Value) bool {
26915	v_1 := v.Args[1]
26916	v_0 := v.Args[0]
26917	b := v.Block
26918	// match: (Min32F <t> x y)
26919	// result: (POR (MINSS <t> (MINSS <t> x y) x) (MINSS <t> x y))
26920	for {
26921		t := v.Type
26922		x := v_0
26923		y := v_1
26924		v.reset(OpAMD64POR)
26925		v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
26926		v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
26927		v1.AddArg2(x, y)
26928		v0.AddArg2(v1, x)
26929		v.AddArg2(v0, v1)
26930		return true
26931	}
26932}
26933func rewriteValueAMD64_OpMin64F(v *Value) bool {
26934	v_1 := v.Args[1]
26935	v_0 := v.Args[0]
26936	b := v.Block
26937	// match: (Min64F <t> x y)
26938	// result: (POR (MINSD <t> (MINSD <t> x y) x) (MINSD <t> x y))
26939	for {
26940		t := v.Type
26941		x := v_0
26942		y := v_1
26943		v.reset(OpAMD64POR)
26944		v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
26945		v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
26946		v1.AddArg2(x, y)
26947		v0.AddArg2(v1, x)
26948		v.AddArg2(v0, v1)
26949		return true
26950	}
26951}
26952func rewriteValueAMD64_OpMod16(v *Value) bool {
26953	v_1 := v.Args[1]
26954	v_0 := v.Args[0]
26955	b := v.Block
26956	typ := &b.Func.Config.Types
26957	// match: (Mod16 [a] x y)
26958	// result: (Select1 (DIVW [a] x y))
26959	for {
26960		a := auxIntToBool(v.AuxInt)
26961		x := v_0
26962		y := v_1
26963		v.reset(OpSelect1)
26964		v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
26965		v0.AuxInt = boolToAuxInt(a)
26966		v0.AddArg2(x, y)
26967		v.AddArg(v0)
26968		return true
26969	}
26970}
26971func rewriteValueAMD64_OpMod16u(v *Value) bool {
26972	v_1 := v.Args[1]
26973	v_0 := v.Args[0]
26974	b := v.Block
26975	typ := &b.Func.Config.Types
26976	// match: (Mod16u x y)
26977	// result: (Select1 (DIVWU x y))
26978	for {
26979		x := v_0
26980		y := v_1
26981		v.reset(OpSelect1)
26982		v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
26983		v0.AddArg2(x, y)
26984		v.AddArg(v0)
26985		return true
26986	}
26987}
26988func rewriteValueAMD64_OpMod32(v *Value) bool {
26989	v_1 := v.Args[1]
26990	v_0 := v.Args[0]
26991	b := v.Block
26992	typ := &b.Func.Config.Types
26993	// match: (Mod32 [a] x y)
26994	// result: (Select1 (DIVL [a] x y))
26995	for {
26996		a := auxIntToBool(v.AuxInt)
26997		x := v_0
26998		y := v_1
26999		v.reset(OpSelect1)
27000		v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
27001		v0.AuxInt = boolToAuxInt(a)
27002		v0.AddArg2(x, y)
27003		v.AddArg(v0)
27004		return true
27005	}
27006}
27007func rewriteValueAMD64_OpMod32u(v *Value) bool {
27008	v_1 := v.Args[1]
27009	v_0 := v.Args[0]
27010	b := v.Block
27011	typ := &b.Func.Config.Types
27012	// match: (Mod32u x y)
27013	// result: (Select1 (DIVLU x y))
27014	for {
27015		x := v_0
27016		y := v_1
27017		v.reset(OpSelect1)
27018		v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
27019		v0.AddArg2(x, y)
27020		v.AddArg(v0)
27021		return true
27022	}
27023}
27024func rewriteValueAMD64_OpMod64(v *Value) bool {
27025	v_1 := v.Args[1]
27026	v_0 := v.Args[0]
27027	b := v.Block
27028	typ := &b.Func.Config.Types
27029	// match: (Mod64 [a] x y)
27030	// result: (Select1 (DIVQ [a] x y))
27031	for {
27032		a := auxIntToBool(v.AuxInt)
27033		x := v_0
27034		y := v_1
27035		v.reset(OpSelect1)
27036		v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
27037		v0.AuxInt = boolToAuxInt(a)
27038		v0.AddArg2(x, y)
27039		v.AddArg(v0)
27040		return true
27041	}
27042}
27043func rewriteValueAMD64_OpMod64u(v *Value) bool {
27044	v_1 := v.Args[1]
27045	v_0 := v.Args[0]
27046	b := v.Block
27047	typ := &b.Func.Config.Types
27048	// match: (Mod64u x y)
27049	// result: (Select1 (DIVQU x y))
27050	for {
27051		x := v_0
27052		y := v_1
27053		v.reset(OpSelect1)
27054		v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
27055		v0.AddArg2(x, y)
27056		v.AddArg(v0)
27057		return true
27058	}
27059}
27060func rewriteValueAMD64_OpMod8(v *Value) bool {
27061	v_1 := v.Args[1]
27062	v_0 := v.Args[0]
27063	b := v.Block
27064	typ := &b.Func.Config.Types
27065	// match: (Mod8 x y)
27066	// result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
27067	for {
27068		x := v_0
27069		y := v_1
27070		v.reset(OpSelect1)
27071		v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
27072		v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27073		v1.AddArg(x)
27074		v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27075		v2.AddArg(y)
27076		v0.AddArg2(v1, v2)
27077		v.AddArg(v0)
27078		return true
27079	}
27080}
27081func rewriteValueAMD64_OpMod8u(v *Value) bool {
27082	v_1 := v.Args[1]
27083	v_0 := v.Args[0]
27084	b := v.Block
27085	typ := &b.Func.Config.Types
27086	// match: (Mod8u x y)
27087	// result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
27088	for {
27089		x := v_0
27090		y := v_1
27091		v.reset(OpSelect1)
27092		v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
27093		v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27094		v1.AddArg(x)
27095		v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27096		v2.AddArg(y)
27097		v0.AddArg2(v1, v2)
27098		v.AddArg(v0)
27099		return true
27100	}
27101}
27102func rewriteValueAMD64_OpMove(v *Value) bool {
27103	v_2 := v.Args[2]
27104	v_1 := v.Args[1]
27105	v_0 := v.Args[0]
27106	b := v.Block
27107	config := b.Func.Config
27108	typ := &b.Func.Config.Types
27109	// match: (Move [0] _ _ mem)
27110	// result: mem
27111	for {
27112		if auxIntToInt64(v.AuxInt) != 0 {
27113			break
27114		}
27115		mem := v_2
27116		v.copyOf(mem)
27117		return true
27118	}
27119	// match: (Move [1] dst src mem)
27120	// result: (MOVBstore dst (MOVBload src mem) mem)
27121	for {
27122		if auxIntToInt64(v.AuxInt) != 1 {
27123			break
27124		}
27125		dst := v_0
27126		src := v_1
27127		mem := v_2
27128		v.reset(OpAMD64MOVBstore)
27129		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27130		v0.AddArg2(src, mem)
27131		v.AddArg3(dst, v0, mem)
27132		return true
27133	}
27134	// match: (Move [2] dst src mem)
27135	// result: (MOVWstore dst (MOVWload src mem) mem)
27136	for {
27137		if auxIntToInt64(v.AuxInt) != 2 {
27138			break
27139		}
27140		dst := v_0
27141		src := v_1
27142		mem := v_2
27143		v.reset(OpAMD64MOVWstore)
27144		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27145		v0.AddArg2(src, mem)
27146		v.AddArg3(dst, v0, mem)
27147		return true
27148	}
27149	// match: (Move [4] dst src mem)
27150	// result: (MOVLstore dst (MOVLload src mem) mem)
27151	for {
27152		if auxIntToInt64(v.AuxInt) != 4 {
27153			break
27154		}
27155		dst := v_0
27156		src := v_1
27157		mem := v_2
27158		v.reset(OpAMD64MOVLstore)
27159		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27160		v0.AddArg2(src, mem)
27161		v.AddArg3(dst, v0, mem)
27162		return true
27163	}
27164	// match: (Move [8] dst src mem)
27165	// result: (MOVQstore dst (MOVQload src mem) mem)
27166	for {
27167		if auxIntToInt64(v.AuxInt) != 8 {
27168			break
27169		}
27170		dst := v_0
27171		src := v_1
27172		mem := v_2
27173		v.reset(OpAMD64MOVQstore)
27174		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27175		v0.AddArg2(src, mem)
27176		v.AddArg3(dst, v0, mem)
27177		return true
27178	}
27179	// match: (Move [16] dst src mem)
27180	// cond: config.useSSE
27181	// result: (MOVOstore dst (MOVOload src mem) mem)
27182	for {
27183		if auxIntToInt64(v.AuxInt) != 16 {
27184			break
27185		}
27186		dst := v_0
27187		src := v_1
27188		mem := v_2
27189		if !(config.useSSE) {
27190			break
27191		}
27192		v.reset(OpAMD64MOVOstore)
27193		v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27194		v0.AddArg2(src, mem)
27195		v.AddArg3(dst, v0, mem)
27196		return true
27197	}
27198	// match: (Move [16] dst src mem)
27199	// cond: !config.useSSE
27200	// result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
27201	for {
27202		if auxIntToInt64(v.AuxInt) != 16 {
27203			break
27204		}
27205		dst := v_0
27206		src := v_1
27207		mem := v_2
27208		if !(!config.useSSE) {
27209			break
27210		}
27211		v.reset(OpAMD64MOVQstore)
27212		v.AuxInt = int32ToAuxInt(8)
27213		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27214		v0.AuxInt = int32ToAuxInt(8)
27215		v0.AddArg2(src, mem)
27216		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27217		v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27218		v2.AddArg2(src, mem)
27219		v1.AddArg3(dst, v2, mem)
27220		v.AddArg3(dst, v0, v1)
27221		return true
27222	}
27223	// match: (Move [32] dst src mem)
27224	// result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
27225	for {
27226		if auxIntToInt64(v.AuxInt) != 32 {
27227			break
27228		}
27229		dst := v_0
27230		src := v_1
27231		mem := v_2
27232		v.reset(OpMove)
27233		v.AuxInt = int64ToAuxInt(16)
27234		v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27235		v0.AuxInt = int64ToAuxInt(16)
27236		v0.AddArg(dst)
27237		v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27238		v1.AuxInt = int64ToAuxInt(16)
27239		v1.AddArg(src)
27240		v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27241		v2.AuxInt = int64ToAuxInt(16)
27242		v2.AddArg3(dst, src, mem)
27243		v.AddArg3(v0, v1, v2)
27244		return true
27245	}
27246	// match: (Move [48] dst src mem)
27247	// cond: config.useSSE
27248	// result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
27249	for {
27250		if auxIntToInt64(v.AuxInt) != 48 {
27251			break
27252		}
27253		dst := v_0
27254		src := v_1
27255		mem := v_2
27256		if !(config.useSSE) {
27257			break
27258		}
27259		v.reset(OpMove)
27260		v.AuxInt = int64ToAuxInt(32)
27261		v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27262		v0.AuxInt = int64ToAuxInt(16)
27263		v0.AddArg(dst)
27264		v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27265		v1.AuxInt = int64ToAuxInt(16)
27266		v1.AddArg(src)
27267		v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27268		v2.AuxInt = int64ToAuxInt(16)
27269		v2.AddArg3(dst, src, mem)
27270		v.AddArg3(v0, v1, v2)
27271		return true
27272	}
27273	// match: (Move [64] dst src mem)
27274	// cond: config.useSSE
27275	// result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem))
27276	for {
27277		if auxIntToInt64(v.AuxInt) != 64 {
27278			break
27279		}
27280		dst := v_0
27281		src := v_1
27282		mem := v_2
27283		if !(config.useSSE) {
27284			break
27285		}
27286		v.reset(OpMove)
27287		v.AuxInt = int64ToAuxInt(32)
27288		v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27289		v0.AuxInt = int64ToAuxInt(32)
27290		v0.AddArg(dst)
27291		v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27292		v1.AuxInt = int64ToAuxInt(32)
27293		v1.AddArg(src)
27294		v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27295		v2.AuxInt = int64ToAuxInt(32)
27296		v2.AddArg3(dst, src, mem)
27297		v.AddArg3(v0, v1, v2)
27298		return true
27299	}
27300	// match: (Move [3] dst src mem)
27301	// result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
27302	for {
27303		if auxIntToInt64(v.AuxInt) != 3 {
27304			break
27305		}
27306		dst := v_0
27307		src := v_1
27308		mem := v_2
27309		v.reset(OpAMD64MOVBstore)
27310		v.AuxInt = int32ToAuxInt(2)
27311		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27312		v0.AuxInt = int32ToAuxInt(2)
27313		v0.AddArg2(src, mem)
27314		v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
27315		v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27316		v2.AddArg2(src, mem)
27317		v1.AddArg3(dst, v2, mem)
27318		v.AddArg3(dst, v0, v1)
27319		return true
27320	}
27321	// match: (Move [5] dst src mem)
27322	// result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
27323	for {
27324		if auxIntToInt64(v.AuxInt) != 5 {
27325			break
27326		}
27327		dst := v_0
27328		src := v_1
27329		mem := v_2
27330		v.reset(OpAMD64MOVBstore)
27331		v.AuxInt = int32ToAuxInt(4)
27332		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27333		v0.AuxInt = int32ToAuxInt(4)
27334		v0.AddArg2(src, mem)
27335		v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27336		v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27337		v2.AddArg2(src, mem)
27338		v1.AddArg3(dst, v2, mem)
27339		v.AddArg3(dst, v0, v1)
27340		return true
27341	}
27342	// match: (Move [6] dst src mem)
27343	// result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
27344	for {
27345		if auxIntToInt64(v.AuxInt) != 6 {
27346			break
27347		}
27348		dst := v_0
27349		src := v_1
27350		mem := v_2
27351		v.reset(OpAMD64MOVWstore)
27352		v.AuxInt = int32ToAuxInt(4)
27353		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27354		v0.AuxInt = int32ToAuxInt(4)
27355		v0.AddArg2(src, mem)
27356		v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27357		v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27358		v2.AddArg2(src, mem)
27359		v1.AddArg3(dst, v2, mem)
27360		v.AddArg3(dst, v0, v1)
27361		return true
27362	}
27363	// match: (Move [7] dst src mem)
27364	// result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
27365	for {
27366		if auxIntToInt64(v.AuxInt) != 7 {
27367			break
27368		}
27369		dst := v_0
27370		src := v_1
27371		mem := v_2
27372		v.reset(OpAMD64MOVLstore)
27373		v.AuxInt = int32ToAuxInt(3)
27374		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27375		v0.AuxInt = int32ToAuxInt(3)
27376		v0.AddArg2(src, mem)
27377		v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27378		v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27379		v2.AddArg2(src, mem)
27380		v1.AddArg3(dst, v2, mem)
27381		v.AddArg3(dst, v0, v1)
27382		return true
27383	}
27384	// match: (Move [9] dst src mem)
27385	// result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
27386	for {
27387		if auxIntToInt64(v.AuxInt) != 9 {
27388			break
27389		}
27390		dst := v_0
27391		src := v_1
27392		mem := v_2
27393		v.reset(OpAMD64MOVBstore)
27394		v.AuxInt = int32ToAuxInt(8)
27395		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27396		v0.AuxInt = int32ToAuxInt(8)
27397		v0.AddArg2(src, mem)
27398		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27399		v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27400		v2.AddArg2(src, mem)
27401		v1.AddArg3(dst, v2, mem)
27402		v.AddArg3(dst, v0, v1)
27403		return true
27404	}
27405	// match: (Move [10] dst src mem)
27406	// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
27407	for {
27408		if auxIntToInt64(v.AuxInt) != 10 {
27409			break
27410		}
27411		dst := v_0
27412		src := v_1
27413		mem := v_2
27414		v.reset(OpAMD64MOVWstore)
27415		v.AuxInt = int32ToAuxInt(8)
27416		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27417		v0.AuxInt = int32ToAuxInt(8)
27418		v0.AddArg2(src, mem)
27419		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27420		v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27421		v2.AddArg2(src, mem)
27422		v1.AddArg3(dst, v2, mem)
27423		v.AddArg3(dst, v0, v1)
27424		return true
27425	}
27426	// match: (Move [11] dst src mem)
27427	// result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem))
27428	for {
27429		if auxIntToInt64(v.AuxInt) != 11 {
27430			break
27431		}
27432		dst := v_0
27433		src := v_1
27434		mem := v_2
27435		v.reset(OpAMD64MOVLstore)
27436		v.AuxInt = int32ToAuxInt(7)
27437		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27438		v0.AuxInt = int32ToAuxInt(7)
27439		v0.AddArg2(src, mem)
27440		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27441		v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27442		v2.AddArg2(src, mem)
27443		v1.AddArg3(dst, v2, mem)
27444		v.AddArg3(dst, v0, v1)
27445		return true
27446	}
27447	// match: (Move [12] dst src mem)
27448	// result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
27449	for {
27450		if auxIntToInt64(v.AuxInt) != 12 {
27451			break
27452		}
27453		dst := v_0
27454		src := v_1
27455		mem := v_2
27456		v.reset(OpAMD64MOVLstore)
27457		v.AuxInt = int32ToAuxInt(8)
27458		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27459		v0.AuxInt = int32ToAuxInt(8)
27460		v0.AddArg2(src, mem)
27461		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27462		v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27463		v2.AddArg2(src, mem)
27464		v1.AddArg3(dst, v2, mem)
27465		v.AddArg3(dst, v0, v1)
27466		return true
27467	}
27468	// match: (Move [s] dst src mem)
27469	// cond: s >= 13 && s <= 15
27470	// result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem))
27471	for {
27472		s := auxIntToInt64(v.AuxInt)
27473		dst := v_0
27474		src := v_1
27475		mem := v_2
27476		if !(s >= 13 && s <= 15) {
27477			break
27478		}
27479		v.reset(OpAMD64MOVQstore)
27480		v.AuxInt = int32ToAuxInt(int32(s - 8))
27481		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27482		v0.AuxInt = int32ToAuxInt(int32(s - 8))
27483		v0.AddArg2(src, mem)
27484		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27485		v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27486		v2.AddArg2(src, mem)
27487		v1.AddArg3(dst, v2, mem)
27488		v.AddArg3(dst, v0, v1)
27489		return true
27490	}
27491	// match: (Move [s] dst src mem)
27492	// cond: s > 16 && s%16 != 0 && s%16 <= 8
27493	// result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem))
27494	for {
27495		s := auxIntToInt64(v.AuxInt)
27496		dst := v_0
27497		src := v_1
27498		mem := v_2
27499		if !(s > 16 && s%16 != 0 && s%16 <= 8) {
27500			break
27501		}
27502		v.reset(OpMove)
27503		v.AuxInt = int64ToAuxInt(s - s%16)
27504		v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27505		v0.AuxInt = int64ToAuxInt(s % 16)
27506		v0.AddArg(dst)
27507		v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27508		v1.AuxInt = int64ToAuxInt(s % 16)
27509		v1.AddArg(src)
27510		v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27511		v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27512		v3.AddArg2(src, mem)
27513		v2.AddArg3(dst, v3, mem)
27514		v.AddArg3(v0, v1, v2)
27515		return true
27516	}
27517	// match: (Move [s] dst src mem)
27518	// cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE
27519	// result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem))
27520	for {
27521		s := auxIntToInt64(v.AuxInt)
27522		dst := v_0
27523		src := v_1
27524		mem := v_2
27525		if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
27526			break
27527		}
27528		v.reset(OpMove)
27529		v.AuxInt = int64ToAuxInt(s - s%16)
27530		v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27531		v0.AuxInt = int64ToAuxInt(s % 16)
27532		v0.AddArg(dst)
27533		v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27534		v1.AuxInt = int64ToAuxInt(s % 16)
27535		v1.AddArg(src)
27536		v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
27537		v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27538		v3.AddArg2(src, mem)
27539		v2.AddArg3(dst, v3, mem)
27540		v.AddArg3(v0, v1, v2)
27541		return true
27542	}
27543	// match: (Move [s] dst src mem)
27544	// cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE
27545	// result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)))
27546	for {
27547		s := auxIntToInt64(v.AuxInt)
27548		dst := v_0
27549		src := v_1
27550		mem := v_2
27551		if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
27552			break
27553		}
27554		v.reset(OpMove)
27555		v.AuxInt = int64ToAuxInt(s - s%16)
27556		v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27557		v0.AuxInt = int64ToAuxInt(s % 16)
27558		v0.AddArg(dst)
27559		v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27560		v1.AuxInt = int64ToAuxInt(s % 16)
27561		v1.AddArg(src)
27562		v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27563		v2.AuxInt = int32ToAuxInt(8)
27564		v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27565		v3.AuxInt = int32ToAuxInt(8)
27566		v3.AddArg2(src, mem)
27567		v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27568		v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27569		v5.AddArg2(src, mem)
27570		v4.AddArg3(dst, v5, mem)
27571		v2.AddArg3(dst, v3, v4)
27572		v.AddArg3(v0, v1, v2)
27573		return true
27574	}
27575	// match: (Move [s] dst src mem)
27576	// cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
27577	// result: (DUFFCOPY [s] dst src mem)
27578	for {
27579		s := auxIntToInt64(v.AuxInt)
27580		dst := v_0
27581		src := v_1
27582		mem := v_2
27583		if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
27584			break
27585		}
27586		v.reset(OpAMD64DUFFCOPY)
27587		v.AuxInt = int64ToAuxInt(s)
27588		v.AddArg3(dst, src, mem)
27589		return true
27590	}
27591	// match: (Move [s] dst src mem)
27592	// cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)
27593	// result: (REPMOVSQ dst src (MOVQconst [s/8]) mem)
27594	for {
27595		s := auxIntToInt64(v.AuxInt)
27596		dst := v_0
27597		src := v_1
27598		mem := v_2
27599		if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) {
27600			break
27601		}
27602		v.reset(OpAMD64REPMOVSQ)
27603		v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
27604		v0.AuxInt = int64ToAuxInt(s / 8)
27605		v.AddArg4(dst, src, v0, mem)
27606		return true
27607	}
27608	return false
27609}
27610func rewriteValueAMD64_OpNeg32F(v *Value) bool {
27611	v_0 := v.Args[0]
27612	b := v.Block
27613	typ := &b.Func.Config.Types
27614	// match: (Neg32F x)
27615	// result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
27616	for {
27617		x := v_0
27618		v.reset(OpAMD64PXOR)
27619		v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
27620		v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
27621		v.AddArg2(x, v0)
27622		return true
27623	}
27624}
27625func rewriteValueAMD64_OpNeg64F(v *Value) bool {
27626	v_0 := v.Args[0]
27627	b := v.Block
27628	typ := &b.Func.Config.Types
27629	// match: (Neg64F x)
27630	// result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
27631	for {
27632		x := v_0
27633		v.reset(OpAMD64PXOR)
27634		v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
27635		v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
27636		v.AddArg2(x, v0)
27637		return true
27638	}
27639}
27640func rewriteValueAMD64_OpNeq16(v *Value) bool {
27641	v_1 := v.Args[1]
27642	v_0 := v.Args[0]
27643	b := v.Block
27644	// match: (Neq16 x y)
27645	// result: (SETNE (CMPW x y))
27646	for {
27647		x := v_0
27648		y := v_1
27649		v.reset(OpAMD64SETNE)
27650		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
27651		v0.AddArg2(x, y)
27652		v.AddArg(v0)
27653		return true
27654	}
27655}
27656func rewriteValueAMD64_OpNeq32(v *Value) bool {
27657	v_1 := v.Args[1]
27658	v_0 := v.Args[0]
27659	b := v.Block
27660	// match: (Neq32 x y)
27661	// result: (SETNE (CMPL x y))
27662	for {
27663		x := v_0
27664		y := v_1
27665		v.reset(OpAMD64SETNE)
27666		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
27667		v0.AddArg2(x, y)
27668		v.AddArg(v0)
27669		return true
27670	}
27671}
27672func rewriteValueAMD64_OpNeq32F(v *Value) bool {
27673	v_1 := v.Args[1]
27674	v_0 := v.Args[0]
27675	b := v.Block
27676	// match: (Neq32F x y)
27677	// result: (SETNEF (UCOMISS x y))
27678	for {
27679		x := v_0
27680		y := v_1
27681		v.reset(OpAMD64SETNEF)
27682		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
27683		v0.AddArg2(x, y)
27684		v.AddArg(v0)
27685		return true
27686	}
27687}
27688func rewriteValueAMD64_OpNeq64(v *Value) bool {
27689	v_1 := v.Args[1]
27690	v_0 := v.Args[0]
27691	b := v.Block
27692	// match: (Neq64 x y)
27693	// result: (SETNE (CMPQ x y))
27694	for {
27695		x := v_0
27696		y := v_1
27697		v.reset(OpAMD64SETNE)
27698		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
27699		v0.AddArg2(x, y)
27700		v.AddArg(v0)
27701		return true
27702	}
27703}
27704func rewriteValueAMD64_OpNeq64F(v *Value) bool {
27705	v_1 := v.Args[1]
27706	v_0 := v.Args[0]
27707	b := v.Block
27708	// match: (Neq64F x y)
27709	// result: (SETNEF (UCOMISD x y))
27710	for {
27711		x := v_0
27712		y := v_1
27713		v.reset(OpAMD64SETNEF)
27714		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
27715		v0.AddArg2(x, y)
27716		v.AddArg(v0)
27717		return true
27718	}
27719}
27720func rewriteValueAMD64_OpNeq8(v *Value) bool {
27721	v_1 := v.Args[1]
27722	v_0 := v.Args[0]
27723	b := v.Block
27724	// match: (Neq8 x y)
27725	// result: (SETNE (CMPB x y))
27726	for {
27727		x := v_0
27728		y := v_1
27729		v.reset(OpAMD64SETNE)
27730		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
27731		v0.AddArg2(x, y)
27732		v.AddArg(v0)
27733		return true
27734	}
27735}
27736func rewriteValueAMD64_OpNeqB(v *Value) bool {
27737	v_1 := v.Args[1]
27738	v_0 := v.Args[0]
27739	b := v.Block
27740	// match: (NeqB x y)
27741	// result: (SETNE (CMPB x y))
27742	for {
27743		x := v_0
27744		y := v_1
27745		v.reset(OpAMD64SETNE)
27746		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
27747		v0.AddArg2(x, y)
27748		v.AddArg(v0)
27749		return true
27750	}
27751}
27752func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
27753	v_1 := v.Args[1]
27754	v_0 := v.Args[0]
27755	b := v.Block
27756	// match: (NeqPtr x y)
27757	// result: (SETNE (CMPQ x y))
27758	for {
27759		x := v_0
27760		y := v_1
27761		v.reset(OpAMD64SETNE)
27762		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
27763		v0.AddArg2(x, y)
27764		v.AddArg(v0)
27765		return true
27766	}
27767}
27768func rewriteValueAMD64_OpNot(v *Value) bool {
27769	v_0 := v.Args[0]
27770	// match: (Not x)
27771	// result: (XORLconst [1] x)
27772	for {
27773		x := v_0
27774		v.reset(OpAMD64XORLconst)
27775		v.AuxInt = int32ToAuxInt(1)
27776		v.AddArg(x)
27777		return true
27778	}
27779}
27780func rewriteValueAMD64_OpOffPtr(v *Value) bool {
27781	v_0 := v.Args[0]
27782	b := v.Block
27783	typ := &b.Func.Config.Types
27784	// match: (OffPtr [off] ptr)
27785	// cond: is32Bit(off)
27786	// result: (ADDQconst [int32(off)] ptr)
27787	for {
27788		off := auxIntToInt64(v.AuxInt)
27789		ptr := v_0
27790		if !(is32Bit(off)) {
27791			break
27792		}
27793		v.reset(OpAMD64ADDQconst)
27794		v.AuxInt = int32ToAuxInt(int32(off))
27795		v.AddArg(ptr)
27796		return true
27797	}
27798	// match: (OffPtr [off] ptr)
27799	// result: (ADDQ (MOVQconst [off]) ptr)
27800	for {
27801		off := auxIntToInt64(v.AuxInt)
27802		ptr := v_0
27803		v.reset(OpAMD64ADDQ)
27804		v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
27805		v0.AuxInt = int64ToAuxInt(off)
27806		v.AddArg2(v0, ptr)
27807		return true
27808	}
27809}
27810func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
27811	v_2 := v.Args[2]
27812	v_1 := v.Args[1]
27813	v_0 := v.Args[0]
27814	// match: (PanicBounds [kind] x y mem)
27815	// cond: boundsABI(kind) == 0
27816	// result: (LoweredPanicBoundsA [kind] x y mem)
27817	for {
27818		kind := auxIntToInt64(v.AuxInt)
27819		x := v_0
27820		y := v_1
27821		mem := v_2
27822		if !(boundsABI(kind) == 0) {
27823			break
27824		}
27825		v.reset(OpAMD64LoweredPanicBoundsA)
27826		v.AuxInt = int64ToAuxInt(kind)
27827		v.AddArg3(x, y, mem)
27828		return true
27829	}
27830	// match: (PanicBounds [kind] x y mem)
27831	// cond: boundsABI(kind) == 1
27832	// result: (LoweredPanicBoundsB [kind] x y mem)
27833	for {
27834		kind := auxIntToInt64(v.AuxInt)
27835		x := v_0
27836		y := v_1
27837		mem := v_2
27838		if !(boundsABI(kind) == 1) {
27839			break
27840		}
27841		v.reset(OpAMD64LoweredPanicBoundsB)
27842		v.AuxInt = int64ToAuxInt(kind)
27843		v.AddArg3(x, y, mem)
27844		return true
27845	}
27846	// match: (PanicBounds [kind] x y mem)
27847	// cond: boundsABI(kind) == 2
27848	// result: (LoweredPanicBoundsC [kind] x y mem)
27849	for {
27850		kind := auxIntToInt64(v.AuxInt)
27851		x := v_0
27852		y := v_1
27853		mem := v_2
27854		if !(boundsABI(kind) == 2) {
27855			break
27856		}
27857		v.reset(OpAMD64LoweredPanicBoundsC)
27858		v.AuxInt = int64ToAuxInt(kind)
27859		v.AddArg3(x, y, mem)
27860		return true
27861	}
27862	return false
27863}
27864func rewriteValueAMD64_OpPopCount16(v *Value) bool {
27865	v_0 := v.Args[0]
27866	b := v.Block
27867	typ := &b.Func.Config.Types
27868	// match: (PopCount16 x)
27869	// result: (POPCNTL (MOVWQZX <typ.UInt32> x))
27870	for {
27871		x := v_0
27872		v.reset(OpAMD64POPCNTL)
27873		v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
27874		v0.AddArg(x)
27875		v.AddArg(v0)
27876		return true
27877	}
27878}
27879func rewriteValueAMD64_OpPopCount8(v *Value) bool {
27880	v_0 := v.Args[0]
27881	b := v.Block
27882	typ := &b.Func.Config.Types
27883	// match: (PopCount8 x)
27884	// result: (POPCNTL (MOVBQZX <typ.UInt32> x))
27885	for {
27886		x := v_0
27887		v.reset(OpAMD64POPCNTL)
27888		v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
27889		v0.AddArg(x)
27890		v.AddArg(v0)
27891		return true
27892	}
27893}
27894func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
27895	v_0 := v.Args[0]
27896	// match: (RoundToEven x)
27897	// result: (ROUNDSD [0] x)
27898	for {
27899		x := v_0
27900		v.reset(OpAMD64ROUNDSD)
27901		v.AuxInt = int8ToAuxInt(0)
27902		v.AddArg(x)
27903		return true
27904	}
27905}
27906func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
27907	v_1 := v.Args[1]
27908	v_0 := v.Args[0]
27909	b := v.Block
27910	// match: (Rsh16Ux16 <t> x y)
27911	// cond: !shiftIsBounded(v)
27912	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
27913	for {
27914		t := v.Type
27915		x := v_0
27916		y := v_1
27917		if !(!shiftIsBounded(v)) {
27918			break
27919		}
27920		v.reset(OpAMD64ANDL)
27921		v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
27922		v0.AddArg2(x, y)
27923		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27924		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
27925		v2.AuxInt = int16ToAuxInt(16)
27926		v2.AddArg(y)
27927		v1.AddArg(v2)
27928		v.AddArg2(v0, v1)
27929		return true
27930	}
27931	// match: (Rsh16Ux16 x y)
27932	// cond: shiftIsBounded(v)
27933	// result: (SHRW x y)
27934	for {
27935		x := v_0
27936		y := v_1
27937		if !(shiftIsBounded(v)) {
27938			break
27939		}
27940		v.reset(OpAMD64SHRW)
27941		v.AddArg2(x, y)
27942		return true
27943	}
27944	return false
27945}
27946func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
27947	v_1 := v.Args[1]
27948	v_0 := v.Args[0]
27949	b := v.Block
27950	// match: (Rsh16Ux32 <t> x y)
27951	// cond: !shiftIsBounded(v)
27952	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
27953	for {
27954		t := v.Type
27955		x := v_0
27956		y := v_1
27957		if !(!shiftIsBounded(v)) {
27958			break
27959		}
27960		v.reset(OpAMD64ANDL)
27961		v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
27962		v0.AddArg2(x, y)
27963		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27964		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
27965		v2.AuxInt = int32ToAuxInt(16)
27966		v2.AddArg(y)
27967		v1.AddArg(v2)
27968		v.AddArg2(v0, v1)
27969		return true
27970	}
27971	// match: (Rsh16Ux32 x y)
27972	// cond: shiftIsBounded(v)
27973	// result: (SHRW x y)
27974	for {
27975		x := v_0
27976		y := v_1
27977		if !(shiftIsBounded(v)) {
27978			break
27979		}
27980		v.reset(OpAMD64SHRW)
27981		v.AddArg2(x, y)
27982		return true
27983	}
27984	return false
27985}
27986func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
27987	v_1 := v.Args[1]
27988	v_0 := v.Args[0]
27989	b := v.Block
27990	// match: (Rsh16Ux64 <t> x y)
27991	// cond: !shiftIsBounded(v)
27992	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
27993	for {
27994		t := v.Type
27995		x := v_0
27996		y := v_1
27997		if !(!shiftIsBounded(v)) {
27998			break
27999		}
28000		v.reset(OpAMD64ANDL)
28001		v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28002		v0.AddArg2(x, y)
28003		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28004		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28005		v2.AuxInt = int32ToAuxInt(16)
28006		v2.AddArg(y)
28007		v1.AddArg(v2)
28008		v.AddArg2(v0, v1)
28009		return true
28010	}
28011	// match: (Rsh16Ux64 x y)
28012	// cond: shiftIsBounded(v)
28013	// result: (SHRW x y)
28014	for {
28015		x := v_0
28016		y := v_1
28017		if !(shiftIsBounded(v)) {
28018			break
28019		}
28020		v.reset(OpAMD64SHRW)
28021		v.AddArg2(x, y)
28022		return true
28023	}
28024	return false
28025}
28026func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
28027	v_1 := v.Args[1]
28028	v_0 := v.Args[0]
28029	b := v.Block
28030	// match: (Rsh16Ux8 <t> x y)
28031	// cond: !shiftIsBounded(v)
28032	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
28033	for {
28034		t := v.Type
28035		x := v_0
28036		y := v_1
28037		if !(!shiftIsBounded(v)) {
28038			break
28039		}
28040		v.reset(OpAMD64ANDL)
28041		v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28042		v0.AddArg2(x, y)
28043		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28044		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28045		v2.AuxInt = int8ToAuxInt(16)
28046		v2.AddArg(y)
28047		v1.AddArg(v2)
28048		v.AddArg2(v0, v1)
28049		return true
28050	}
28051	// match: (Rsh16Ux8 x y)
28052	// cond: shiftIsBounded(v)
28053	// result: (SHRW x y)
28054	for {
28055		x := v_0
28056		y := v_1
28057		if !(shiftIsBounded(v)) {
28058			break
28059		}
28060		v.reset(OpAMD64SHRW)
28061		v.AddArg2(x, y)
28062		return true
28063	}
28064	return false
28065}
28066func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
28067	v_1 := v.Args[1]
28068	v_0 := v.Args[0]
28069	b := v.Block
28070	// match: (Rsh16x16 <t> x y)
28071	// cond: !shiftIsBounded(v)
28072	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
28073	for {
28074		t := v.Type
28075		x := v_0
28076		y := v_1
28077		if !(!shiftIsBounded(v)) {
28078			break
28079		}
28080		v.reset(OpAMD64SARW)
28081		v.Type = t
28082		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28083		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28084		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28085		v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28086		v3.AuxInt = int16ToAuxInt(16)
28087		v3.AddArg(y)
28088		v2.AddArg(v3)
28089		v1.AddArg(v2)
28090		v0.AddArg2(y, v1)
28091		v.AddArg2(x, v0)
28092		return true
28093	}
28094	// match: (Rsh16x16 x y)
28095	// cond: shiftIsBounded(v)
28096	// result: (SARW x y)
28097	for {
28098		x := v_0
28099		y := v_1
28100		if !(shiftIsBounded(v)) {
28101			break
28102		}
28103		v.reset(OpAMD64SARW)
28104		v.AddArg2(x, y)
28105		return true
28106	}
28107	return false
28108}
28109func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
28110	v_1 := v.Args[1]
28111	v_0 := v.Args[0]
28112	b := v.Block
28113	// match: (Rsh16x32 <t> x y)
28114	// cond: !shiftIsBounded(v)
28115	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
28116	for {
28117		t := v.Type
28118		x := v_0
28119		y := v_1
28120		if !(!shiftIsBounded(v)) {
28121			break
28122		}
28123		v.reset(OpAMD64SARW)
28124		v.Type = t
28125		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28126		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28127		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28128		v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28129		v3.AuxInt = int32ToAuxInt(16)
28130		v3.AddArg(y)
28131		v2.AddArg(v3)
28132		v1.AddArg(v2)
28133		v0.AddArg2(y, v1)
28134		v.AddArg2(x, v0)
28135		return true
28136	}
28137	// match: (Rsh16x32 x y)
28138	// cond: shiftIsBounded(v)
28139	// result: (SARW x y)
28140	for {
28141		x := v_0
28142		y := v_1
28143		if !(shiftIsBounded(v)) {
28144			break
28145		}
28146		v.reset(OpAMD64SARW)
28147		v.AddArg2(x, y)
28148		return true
28149	}
28150	return false
28151}
28152func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
28153	v_1 := v.Args[1]
28154	v_0 := v.Args[0]
28155	b := v.Block
28156	// match: (Rsh16x64 <t> x y)
28157	// cond: !shiftIsBounded(v)
28158	// result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
28159	for {
28160		t := v.Type
28161		x := v_0
28162		y := v_1
28163		if !(!shiftIsBounded(v)) {
28164			break
28165		}
28166		v.reset(OpAMD64SARW)
28167		v.Type = t
28168		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28169		v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28170		v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28171		v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28172		v3.AuxInt = int32ToAuxInt(16)
28173		v3.AddArg(y)
28174		v2.AddArg(v3)
28175		v1.AddArg(v2)
28176		v0.AddArg2(y, v1)
28177		v.AddArg2(x, v0)
28178		return true
28179	}
28180	// match: (Rsh16x64 x y)
28181	// cond: shiftIsBounded(v)
28182	// result: (SARW x y)
28183	for {
28184		x := v_0
28185		y := v_1
28186		if !(shiftIsBounded(v)) {
28187			break
28188		}
28189		v.reset(OpAMD64SARW)
28190		v.AddArg2(x, y)
28191		return true
28192	}
28193	return false
28194}
28195func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
28196	v_1 := v.Args[1]
28197	v_0 := v.Args[0]
28198	b := v.Block
28199	// match: (Rsh16x8 <t> x y)
28200	// cond: !shiftIsBounded(v)
28201	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
28202	for {
28203		t := v.Type
28204		x := v_0
28205		y := v_1
28206		if !(!shiftIsBounded(v)) {
28207			break
28208		}
28209		v.reset(OpAMD64SARW)
28210		v.Type = t
28211		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28212		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28213		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28214		v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28215		v3.AuxInt = int8ToAuxInt(16)
28216		v3.AddArg(y)
28217		v2.AddArg(v3)
28218		v1.AddArg(v2)
28219		v0.AddArg2(y, v1)
28220		v.AddArg2(x, v0)
28221		return true
28222	}
28223	// match: (Rsh16x8 x y)
28224	// cond: shiftIsBounded(v)
28225	// result: (SARW x y)
28226	for {
28227		x := v_0
28228		y := v_1
28229		if !(shiftIsBounded(v)) {
28230			break
28231		}
28232		v.reset(OpAMD64SARW)
28233		v.AddArg2(x, y)
28234		return true
28235	}
28236	return false
28237}
28238func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
28239	v_1 := v.Args[1]
28240	v_0 := v.Args[0]
28241	b := v.Block
28242	// match: (Rsh32Ux16 <t> x y)
28243	// cond: !shiftIsBounded(v)
28244	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
28245	for {
28246		t := v.Type
28247		x := v_0
28248		y := v_1
28249		if !(!shiftIsBounded(v)) {
28250			break
28251		}
28252		v.reset(OpAMD64ANDL)
28253		v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28254		v0.AddArg2(x, y)
28255		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28256		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28257		v2.AuxInt = int16ToAuxInt(32)
28258		v2.AddArg(y)
28259		v1.AddArg(v2)
28260		v.AddArg2(v0, v1)
28261		return true
28262	}
28263	// match: (Rsh32Ux16 x y)
28264	// cond: shiftIsBounded(v)
28265	// result: (SHRL x y)
28266	for {
28267		x := v_0
28268		y := v_1
28269		if !(shiftIsBounded(v)) {
28270			break
28271		}
28272		v.reset(OpAMD64SHRL)
28273		v.AddArg2(x, y)
28274		return true
28275	}
28276	return false
28277}
28278func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
28279	v_1 := v.Args[1]
28280	v_0 := v.Args[0]
28281	b := v.Block
28282	// match: (Rsh32Ux32 <t> x y)
28283	// cond: !shiftIsBounded(v)
28284	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
28285	for {
28286		t := v.Type
28287		x := v_0
28288		y := v_1
28289		if !(!shiftIsBounded(v)) {
28290			break
28291		}
28292		v.reset(OpAMD64ANDL)
28293		v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28294		v0.AddArg2(x, y)
28295		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28296		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28297		v2.AuxInt = int32ToAuxInt(32)
28298		v2.AddArg(y)
28299		v1.AddArg(v2)
28300		v.AddArg2(v0, v1)
28301		return true
28302	}
28303	// match: (Rsh32Ux32 x y)
28304	// cond: shiftIsBounded(v)
28305	// result: (SHRL x y)
28306	for {
28307		x := v_0
28308		y := v_1
28309		if !(shiftIsBounded(v)) {
28310			break
28311		}
28312		v.reset(OpAMD64SHRL)
28313		v.AddArg2(x, y)
28314		return true
28315	}
28316	return false
28317}
28318func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
28319	v_1 := v.Args[1]
28320	v_0 := v.Args[0]
28321	b := v.Block
28322	// match: (Rsh32Ux64 <t> x y)
28323	// cond: !shiftIsBounded(v)
28324	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
28325	for {
28326		t := v.Type
28327		x := v_0
28328		y := v_1
28329		if !(!shiftIsBounded(v)) {
28330			break
28331		}
28332		v.reset(OpAMD64ANDL)
28333		v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28334		v0.AddArg2(x, y)
28335		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28336		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28337		v2.AuxInt = int32ToAuxInt(32)
28338		v2.AddArg(y)
28339		v1.AddArg(v2)
28340		v.AddArg2(v0, v1)
28341		return true
28342	}
28343	// match: (Rsh32Ux64 x y)
28344	// cond: shiftIsBounded(v)
28345	// result: (SHRL x y)
28346	for {
28347		x := v_0
28348		y := v_1
28349		if !(shiftIsBounded(v)) {
28350			break
28351		}
28352		v.reset(OpAMD64SHRL)
28353		v.AddArg2(x, y)
28354		return true
28355	}
28356	return false
28357}
28358func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
28359	v_1 := v.Args[1]
28360	v_0 := v.Args[0]
28361	b := v.Block
28362	// match: (Rsh32Ux8 <t> x y)
28363	// cond: !shiftIsBounded(v)
28364	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
28365	for {
28366		t := v.Type
28367		x := v_0
28368		y := v_1
28369		if !(!shiftIsBounded(v)) {
28370			break
28371		}
28372		v.reset(OpAMD64ANDL)
28373		v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28374		v0.AddArg2(x, y)
28375		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28376		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28377		v2.AuxInt = int8ToAuxInt(32)
28378		v2.AddArg(y)
28379		v1.AddArg(v2)
28380		v.AddArg2(v0, v1)
28381		return true
28382	}
28383	// match: (Rsh32Ux8 x y)
28384	// cond: shiftIsBounded(v)
28385	// result: (SHRL x y)
28386	for {
28387		x := v_0
28388		y := v_1
28389		if !(shiftIsBounded(v)) {
28390			break
28391		}
28392		v.reset(OpAMD64SHRL)
28393		v.AddArg2(x, y)
28394		return true
28395	}
28396	return false
28397}
28398func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
28399	v_1 := v.Args[1]
28400	v_0 := v.Args[0]
28401	b := v.Block
28402	// match: (Rsh32x16 <t> x y)
28403	// cond: !shiftIsBounded(v)
28404	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
28405	for {
28406		t := v.Type
28407		x := v_0
28408		y := v_1
28409		if !(!shiftIsBounded(v)) {
28410			break
28411		}
28412		v.reset(OpAMD64SARL)
28413		v.Type = t
28414		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28415		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28416		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28417		v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28418		v3.AuxInt = int16ToAuxInt(32)
28419		v3.AddArg(y)
28420		v2.AddArg(v3)
28421		v1.AddArg(v2)
28422		v0.AddArg2(y, v1)
28423		v.AddArg2(x, v0)
28424		return true
28425	}
28426	// match: (Rsh32x16 x y)
28427	// cond: shiftIsBounded(v)
28428	// result: (SARL x y)
28429	for {
28430		x := v_0
28431		y := v_1
28432		if !(shiftIsBounded(v)) {
28433			break
28434		}
28435		v.reset(OpAMD64SARL)
28436		v.AddArg2(x, y)
28437		return true
28438	}
28439	return false
28440}
28441func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
28442	v_1 := v.Args[1]
28443	v_0 := v.Args[0]
28444	b := v.Block
28445	// match: (Rsh32x32 <t> x y)
28446	// cond: !shiftIsBounded(v)
28447	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
28448	for {
28449		t := v.Type
28450		x := v_0
28451		y := v_1
28452		if !(!shiftIsBounded(v)) {
28453			break
28454		}
28455		v.reset(OpAMD64SARL)
28456		v.Type = t
28457		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28458		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28459		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28460		v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28461		v3.AuxInt = int32ToAuxInt(32)
28462		v3.AddArg(y)
28463		v2.AddArg(v3)
28464		v1.AddArg(v2)
28465		v0.AddArg2(y, v1)
28466		v.AddArg2(x, v0)
28467		return true
28468	}
28469	// match: (Rsh32x32 x y)
28470	// cond: shiftIsBounded(v)
28471	// result: (SARL x y)
28472	for {
28473		x := v_0
28474		y := v_1
28475		if !(shiftIsBounded(v)) {
28476			break
28477		}
28478		v.reset(OpAMD64SARL)
28479		v.AddArg2(x, y)
28480		return true
28481	}
28482	return false
28483}
28484func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
28485	v_1 := v.Args[1]
28486	v_0 := v.Args[0]
28487	b := v.Block
28488	// match: (Rsh32x64 <t> x y)
28489	// cond: !shiftIsBounded(v)
28490	// result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
28491	for {
28492		t := v.Type
28493		x := v_0
28494		y := v_1
28495		if !(!shiftIsBounded(v)) {
28496			break
28497		}
28498		v.reset(OpAMD64SARL)
28499		v.Type = t
28500		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28501		v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28502		v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28503		v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28504		v3.AuxInt = int32ToAuxInt(32)
28505		v3.AddArg(y)
28506		v2.AddArg(v3)
28507		v1.AddArg(v2)
28508		v0.AddArg2(y, v1)
28509		v.AddArg2(x, v0)
28510		return true
28511	}
28512	// match: (Rsh32x64 x y)
28513	// cond: shiftIsBounded(v)
28514	// result: (SARL x y)
28515	for {
28516		x := v_0
28517		y := v_1
28518		if !(shiftIsBounded(v)) {
28519			break
28520		}
28521		v.reset(OpAMD64SARL)
28522		v.AddArg2(x, y)
28523		return true
28524	}
28525	return false
28526}
28527func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
28528	v_1 := v.Args[1]
28529	v_0 := v.Args[0]
28530	b := v.Block
28531	// match: (Rsh32x8 <t> x y)
28532	// cond: !shiftIsBounded(v)
28533	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
28534	for {
28535		t := v.Type
28536		x := v_0
28537		y := v_1
28538		if !(!shiftIsBounded(v)) {
28539			break
28540		}
28541		v.reset(OpAMD64SARL)
28542		v.Type = t
28543		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28544		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28545		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28546		v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28547		v3.AuxInt = int8ToAuxInt(32)
28548		v3.AddArg(y)
28549		v2.AddArg(v3)
28550		v1.AddArg(v2)
28551		v0.AddArg2(y, v1)
28552		v.AddArg2(x, v0)
28553		return true
28554	}
28555	// match: (Rsh32x8 x y)
28556	// cond: shiftIsBounded(v)
28557	// result: (SARL x y)
28558	for {
28559		x := v_0
28560		y := v_1
28561		if !(shiftIsBounded(v)) {
28562			break
28563		}
28564		v.reset(OpAMD64SARL)
28565		v.AddArg2(x, y)
28566		return true
28567	}
28568	return false
28569}
28570func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
28571	v_1 := v.Args[1]
28572	v_0 := v.Args[0]
28573	b := v.Block
28574	// match: (Rsh64Ux16 <t> x y)
28575	// cond: !shiftIsBounded(v)
28576	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
28577	for {
28578		t := v.Type
28579		x := v_0
28580		y := v_1
28581		if !(!shiftIsBounded(v)) {
28582			break
28583		}
28584		v.reset(OpAMD64ANDQ)
28585		v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28586		v0.AddArg2(x, y)
28587		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28588		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28589		v2.AuxInt = int16ToAuxInt(64)
28590		v2.AddArg(y)
28591		v1.AddArg(v2)
28592		v.AddArg2(v0, v1)
28593		return true
28594	}
28595	// match: (Rsh64Ux16 x y)
28596	// cond: shiftIsBounded(v)
28597	// result: (SHRQ x y)
28598	for {
28599		x := v_0
28600		y := v_1
28601		if !(shiftIsBounded(v)) {
28602			break
28603		}
28604		v.reset(OpAMD64SHRQ)
28605		v.AddArg2(x, y)
28606		return true
28607	}
28608	return false
28609}
28610func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
28611	v_1 := v.Args[1]
28612	v_0 := v.Args[0]
28613	b := v.Block
28614	// match: (Rsh64Ux32 <t> x y)
28615	// cond: !shiftIsBounded(v)
28616	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
28617	for {
28618		t := v.Type
28619		x := v_0
28620		y := v_1
28621		if !(!shiftIsBounded(v)) {
28622			break
28623		}
28624		v.reset(OpAMD64ANDQ)
28625		v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28626		v0.AddArg2(x, y)
28627		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28628		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28629		v2.AuxInt = int32ToAuxInt(64)
28630		v2.AddArg(y)
28631		v1.AddArg(v2)
28632		v.AddArg2(v0, v1)
28633		return true
28634	}
28635	// match: (Rsh64Ux32 x y)
28636	// cond: shiftIsBounded(v)
28637	// result: (SHRQ x y)
28638	for {
28639		x := v_0
28640		y := v_1
28641		if !(shiftIsBounded(v)) {
28642			break
28643		}
28644		v.reset(OpAMD64SHRQ)
28645		v.AddArg2(x, y)
28646		return true
28647	}
28648	return false
28649}
28650func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
28651	v_1 := v.Args[1]
28652	v_0 := v.Args[0]
28653	b := v.Block
28654	// match: (Rsh64Ux64 <t> x y)
28655	// cond: !shiftIsBounded(v)
28656	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
28657	for {
28658		t := v.Type
28659		x := v_0
28660		y := v_1
28661		if !(!shiftIsBounded(v)) {
28662			break
28663		}
28664		v.reset(OpAMD64ANDQ)
28665		v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28666		v0.AddArg2(x, y)
28667		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28668		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28669		v2.AuxInt = int32ToAuxInt(64)
28670		v2.AddArg(y)
28671		v1.AddArg(v2)
28672		v.AddArg2(v0, v1)
28673		return true
28674	}
28675	// match: (Rsh64Ux64 x y)
28676	// cond: shiftIsBounded(v)
28677	// result: (SHRQ x y)
28678	for {
28679		x := v_0
28680		y := v_1
28681		if !(shiftIsBounded(v)) {
28682			break
28683		}
28684		v.reset(OpAMD64SHRQ)
28685		v.AddArg2(x, y)
28686		return true
28687	}
28688	return false
28689}
28690func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
28691	v_1 := v.Args[1]
28692	v_0 := v.Args[0]
28693	b := v.Block
28694	// match: (Rsh64Ux8 <t> x y)
28695	// cond: !shiftIsBounded(v)
28696	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
28697	for {
28698		t := v.Type
28699		x := v_0
28700		y := v_1
28701		if !(!shiftIsBounded(v)) {
28702			break
28703		}
28704		v.reset(OpAMD64ANDQ)
28705		v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28706		v0.AddArg2(x, y)
28707		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28708		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28709		v2.AuxInt = int8ToAuxInt(64)
28710		v2.AddArg(y)
28711		v1.AddArg(v2)
28712		v.AddArg2(v0, v1)
28713		return true
28714	}
28715	// match: (Rsh64Ux8 x y)
28716	// cond: shiftIsBounded(v)
28717	// result: (SHRQ x y)
28718	for {
28719		x := v_0
28720		y := v_1
28721		if !(shiftIsBounded(v)) {
28722			break
28723		}
28724		v.reset(OpAMD64SHRQ)
28725		v.AddArg2(x, y)
28726		return true
28727	}
28728	return false
28729}
28730func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
28731	v_1 := v.Args[1]
28732	v_0 := v.Args[0]
28733	b := v.Block
28734	// match: (Rsh64x16 <t> x y)
28735	// cond: !shiftIsBounded(v)
28736	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
28737	for {
28738		t := v.Type
28739		x := v_0
28740		y := v_1
28741		if !(!shiftIsBounded(v)) {
28742			break
28743		}
28744		v.reset(OpAMD64SARQ)
28745		v.Type = t
28746		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28747		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28748		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28749		v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28750		v3.AuxInt = int16ToAuxInt(64)
28751		v3.AddArg(y)
28752		v2.AddArg(v3)
28753		v1.AddArg(v2)
28754		v0.AddArg2(y, v1)
28755		v.AddArg2(x, v0)
28756		return true
28757	}
28758	// match: (Rsh64x16 x y)
28759	// cond: shiftIsBounded(v)
28760	// result: (SARQ x y)
28761	for {
28762		x := v_0
28763		y := v_1
28764		if !(shiftIsBounded(v)) {
28765			break
28766		}
28767		v.reset(OpAMD64SARQ)
28768		v.AddArg2(x, y)
28769		return true
28770	}
28771	return false
28772}
28773func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
28774	v_1 := v.Args[1]
28775	v_0 := v.Args[0]
28776	b := v.Block
28777	// match: (Rsh64x32 <t> x y)
28778	// cond: !shiftIsBounded(v)
28779	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
28780	for {
28781		t := v.Type
28782		x := v_0
28783		y := v_1
28784		if !(!shiftIsBounded(v)) {
28785			break
28786		}
28787		v.reset(OpAMD64SARQ)
28788		v.Type = t
28789		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28790		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28791		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28792		v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28793		v3.AuxInt = int32ToAuxInt(64)
28794		v3.AddArg(y)
28795		v2.AddArg(v3)
28796		v1.AddArg(v2)
28797		v0.AddArg2(y, v1)
28798		v.AddArg2(x, v0)
28799		return true
28800	}
28801	// match: (Rsh64x32 x y)
28802	// cond: shiftIsBounded(v)
28803	// result: (SARQ x y)
28804	for {
28805		x := v_0
28806		y := v_1
28807		if !(shiftIsBounded(v)) {
28808			break
28809		}
28810		v.reset(OpAMD64SARQ)
28811		v.AddArg2(x, y)
28812		return true
28813	}
28814	return false
28815}
28816func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
28817	v_1 := v.Args[1]
28818	v_0 := v.Args[0]
28819	b := v.Block
28820	// match: (Rsh64x64 <t> x y)
28821	// cond: !shiftIsBounded(v)
28822	// result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
28823	for {
28824		t := v.Type
28825		x := v_0
28826		y := v_1
28827		if !(!shiftIsBounded(v)) {
28828			break
28829		}
28830		v.reset(OpAMD64SARQ)
28831		v.Type = t
28832		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28833		v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28834		v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28835		v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28836		v3.AuxInt = int32ToAuxInt(64)
28837		v3.AddArg(y)
28838		v2.AddArg(v3)
28839		v1.AddArg(v2)
28840		v0.AddArg2(y, v1)
28841		v.AddArg2(x, v0)
28842		return true
28843	}
28844	// match: (Rsh64x64 x y)
28845	// cond: shiftIsBounded(v)
28846	// result: (SARQ x y)
28847	for {
28848		x := v_0
28849		y := v_1
28850		if !(shiftIsBounded(v)) {
28851			break
28852		}
28853		v.reset(OpAMD64SARQ)
28854		v.AddArg2(x, y)
28855		return true
28856	}
28857	return false
28858}
28859func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
28860	v_1 := v.Args[1]
28861	v_0 := v.Args[0]
28862	b := v.Block
28863	// match: (Rsh64x8 <t> x y)
28864	// cond: !shiftIsBounded(v)
28865	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
28866	for {
28867		t := v.Type
28868		x := v_0
28869		y := v_1
28870		if !(!shiftIsBounded(v)) {
28871			break
28872		}
28873		v.reset(OpAMD64SARQ)
28874		v.Type = t
28875		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28876		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28877		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28878		v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28879		v3.AuxInt = int8ToAuxInt(64)
28880		v3.AddArg(y)
28881		v2.AddArg(v3)
28882		v1.AddArg(v2)
28883		v0.AddArg2(y, v1)
28884		v.AddArg2(x, v0)
28885		return true
28886	}
28887	// match: (Rsh64x8 x y)
28888	// cond: shiftIsBounded(v)
28889	// result: (SARQ x y)
28890	for {
28891		x := v_0
28892		y := v_1
28893		if !(shiftIsBounded(v)) {
28894			break
28895		}
28896		v.reset(OpAMD64SARQ)
28897		v.AddArg2(x, y)
28898		return true
28899	}
28900	return false
28901}
28902func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
28903	v_1 := v.Args[1]
28904	v_0 := v.Args[0]
28905	b := v.Block
28906	// match: (Rsh8Ux16 <t> x y)
28907	// cond: !shiftIsBounded(v)
28908	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
28909	for {
28910		t := v.Type
28911		x := v_0
28912		y := v_1
28913		if !(!shiftIsBounded(v)) {
28914			break
28915		}
28916		v.reset(OpAMD64ANDL)
28917		v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28918		v0.AddArg2(x, y)
28919		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28920		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28921		v2.AuxInt = int16ToAuxInt(8)
28922		v2.AddArg(y)
28923		v1.AddArg(v2)
28924		v.AddArg2(v0, v1)
28925		return true
28926	}
28927	// match: (Rsh8Ux16 x y)
28928	// cond: shiftIsBounded(v)
28929	// result: (SHRB x y)
28930	for {
28931		x := v_0
28932		y := v_1
28933		if !(shiftIsBounded(v)) {
28934			break
28935		}
28936		v.reset(OpAMD64SHRB)
28937		v.AddArg2(x, y)
28938		return true
28939	}
28940	return false
28941}
28942func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
28943	v_1 := v.Args[1]
28944	v_0 := v.Args[0]
28945	b := v.Block
28946	// match: (Rsh8Ux32 <t> x y)
28947	// cond: !shiftIsBounded(v)
28948	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
28949	for {
28950		t := v.Type
28951		x := v_0
28952		y := v_1
28953		if !(!shiftIsBounded(v)) {
28954			break
28955		}
28956		v.reset(OpAMD64ANDL)
28957		v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28958		v0.AddArg2(x, y)
28959		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28960		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28961		v2.AuxInt = int32ToAuxInt(8)
28962		v2.AddArg(y)
28963		v1.AddArg(v2)
28964		v.AddArg2(v0, v1)
28965		return true
28966	}
28967	// match: (Rsh8Ux32 x y)
28968	// cond: shiftIsBounded(v)
28969	// result: (SHRB x y)
28970	for {
28971		x := v_0
28972		y := v_1
28973		if !(shiftIsBounded(v)) {
28974			break
28975		}
28976		v.reset(OpAMD64SHRB)
28977		v.AddArg2(x, y)
28978		return true
28979	}
28980	return false
28981}
28982func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
28983	v_1 := v.Args[1]
28984	v_0 := v.Args[0]
28985	b := v.Block
28986	// match: (Rsh8Ux64 <t> x y)
28987	// cond: !shiftIsBounded(v)
28988	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
28989	for {
28990		t := v.Type
28991		x := v_0
28992		y := v_1
28993		if !(!shiftIsBounded(v)) {
28994			break
28995		}
28996		v.reset(OpAMD64ANDL)
28997		v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28998		v0.AddArg2(x, y)
28999		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29000		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29001		v2.AuxInt = int32ToAuxInt(8)
29002		v2.AddArg(y)
29003		v1.AddArg(v2)
29004		v.AddArg2(v0, v1)
29005		return true
29006	}
29007	// match: (Rsh8Ux64 x y)
29008	// cond: shiftIsBounded(v)
29009	// result: (SHRB x y)
29010	for {
29011		x := v_0
29012		y := v_1
29013		if !(shiftIsBounded(v)) {
29014			break
29015		}
29016		v.reset(OpAMD64SHRB)
29017		v.AddArg2(x, y)
29018		return true
29019	}
29020	return false
29021}
29022func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
29023	v_1 := v.Args[1]
29024	v_0 := v.Args[0]
29025	b := v.Block
29026	// match: (Rsh8Ux8 <t> x y)
29027	// cond: !shiftIsBounded(v)
29028	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
29029	for {
29030		t := v.Type
29031		x := v_0
29032		y := v_1
29033		if !(!shiftIsBounded(v)) {
29034			break
29035		}
29036		v.reset(OpAMD64ANDL)
29037		v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29038		v0.AddArg2(x, y)
29039		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29040		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29041		v2.AuxInt = int8ToAuxInt(8)
29042		v2.AddArg(y)
29043		v1.AddArg(v2)
29044		v.AddArg2(v0, v1)
29045		return true
29046	}
29047	// match: (Rsh8Ux8 x y)
29048	// cond: shiftIsBounded(v)
29049	// result: (SHRB x y)
29050	for {
29051		x := v_0
29052		y := v_1
29053		if !(shiftIsBounded(v)) {
29054			break
29055		}
29056		v.reset(OpAMD64SHRB)
29057		v.AddArg2(x, y)
29058		return true
29059	}
29060	return false
29061}
29062func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
29063	v_1 := v.Args[1]
29064	v_0 := v.Args[0]
29065	b := v.Block
29066	// match: (Rsh8x16 <t> x y)
29067	// cond: !shiftIsBounded(v)
29068	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
29069	for {
29070		t := v.Type
29071		x := v_0
29072		y := v_1
29073		if !(!shiftIsBounded(v)) {
29074			break
29075		}
29076		v.reset(OpAMD64SARB)
29077		v.Type = t
29078		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29079		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29080		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29081		v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29082		v3.AuxInt = int16ToAuxInt(8)
29083		v3.AddArg(y)
29084		v2.AddArg(v3)
29085		v1.AddArg(v2)
29086		v0.AddArg2(y, v1)
29087		v.AddArg2(x, v0)
29088		return true
29089	}
29090	// match: (Rsh8x16 x y)
29091	// cond: shiftIsBounded(v)
29092	// result: (SARB x y)
29093	for {
29094		x := v_0
29095		y := v_1
29096		if !(shiftIsBounded(v)) {
29097			break
29098		}
29099		v.reset(OpAMD64SARB)
29100		v.AddArg2(x, y)
29101		return true
29102	}
29103	return false
29104}
29105func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
29106	v_1 := v.Args[1]
29107	v_0 := v.Args[0]
29108	b := v.Block
29109	// match: (Rsh8x32 <t> x y)
29110	// cond: !shiftIsBounded(v)
29111	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
29112	for {
29113		t := v.Type
29114		x := v_0
29115		y := v_1
29116		if !(!shiftIsBounded(v)) {
29117			break
29118		}
29119		v.reset(OpAMD64SARB)
29120		v.Type = t
29121		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29122		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29123		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29124		v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29125		v3.AuxInt = int32ToAuxInt(8)
29126		v3.AddArg(y)
29127		v2.AddArg(v3)
29128		v1.AddArg(v2)
29129		v0.AddArg2(y, v1)
29130		v.AddArg2(x, v0)
29131		return true
29132	}
29133	// match: (Rsh8x32 x y)
29134	// cond: shiftIsBounded(v)
29135	// result: (SARB x y)
29136	for {
29137		x := v_0
29138		y := v_1
29139		if !(shiftIsBounded(v)) {
29140			break
29141		}
29142		v.reset(OpAMD64SARB)
29143		v.AddArg2(x, y)
29144		return true
29145	}
29146	return false
29147}
29148func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
29149	v_1 := v.Args[1]
29150	v_0 := v.Args[0]
29151	b := v.Block
29152	// match: (Rsh8x64 <t> x y)
29153	// cond: !shiftIsBounded(v)
29154	// result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
29155	for {
29156		t := v.Type
29157		x := v_0
29158		y := v_1
29159		if !(!shiftIsBounded(v)) {
29160			break
29161		}
29162		v.reset(OpAMD64SARB)
29163		v.Type = t
29164		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
29165		v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
29166		v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
29167		v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29168		v3.AuxInt = int32ToAuxInt(8)
29169		v3.AddArg(y)
29170		v2.AddArg(v3)
29171		v1.AddArg(v2)
29172		v0.AddArg2(y, v1)
29173		v.AddArg2(x, v0)
29174		return true
29175	}
29176	// match: (Rsh8x64 x y)
29177	// cond: shiftIsBounded(v)
29178	// result: (SARB x y)
29179	for {
29180		x := v_0
29181		y := v_1
29182		if !(shiftIsBounded(v)) {
29183			break
29184		}
29185		v.reset(OpAMD64SARB)
29186		v.AddArg2(x, y)
29187		return true
29188	}
29189	return false
29190}
29191func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
29192	v_1 := v.Args[1]
29193	v_0 := v.Args[0]
29194	b := v.Block
29195	// match: (Rsh8x8 <t> x y)
29196	// cond: !shiftIsBounded(v)
29197	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
29198	for {
29199		t := v.Type
29200		x := v_0
29201		y := v_1
29202		if !(!shiftIsBounded(v)) {
29203			break
29204		}
29205		v.reset(OpAMD64SARB)
29206		v.Type = t
29207		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29208		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29209		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29210		v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29211		v3.AuxInt = int8ToAuxInt(8)
29212		v3.AddArg(y)
29213		v2.AddArg(v3)
29214		v1.AddArg(v2)
29215		v0.AddArg2(y, v1)
29216		v.AddArg2(x, v0)
29217		return true
29218	}
29219	// match: (Rsh8x8 x y)
29220	// cond: shiftIsBounded(v)
29221	// result: (SARB x y)
29222	for {
29223		x := v_0
29224		y := v_1
29225		if !(shiftIsBounded(v)) {
29226			break
29227		}
29228		v.reset(OpAMD64SARB)
29229		v.AddArg2(x, y)
29230		return true
29231	}
29232	return false
29233}
29234func rewriteValueAMD64_OpSelect0(v *Value) bool {
29235	v_0 := v.Args[0]
29236	b := v.Block
29237	typ := &b.Func.Config.Types
29238	// match: (Select0 (Mul64uover x y))
29239	// result: (Select0 <typ.UInt64> (MULQU x y))
29240	for {
29241		if v_0.Op != OpMul64uover {
29242			break
29243		}
29244		y := v_0.Args[1]
29245		x := v_0.Args[0]
29246		v.reset(OpSelect0)
29247		v.Type = typ.UInt64
29248		v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29249		v0.AddArg2(x, y)
29250		v.AddArg(v0)
29251		return true
29252	}
29253	// match: (Select0 (Mul32uover x y))
29254	// result: (Select0 <typ.UInt32> (MULLU x y))
29255	for {
29256		if v_0.Op != OpMul32uover {
29257			break
29258		}
29259		y := v_0.Args[1]
29260		x := v_0.Args[0]
29261		v.reset(OpSelect0)
29262		v.Type = typ.UInt32
29263		v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29264		v0.AddArg2(x, y)
29265		v.AddArg(v0)
29266		return true
29267	}
29268	// match: (Select0 (Add64carry x y c))
29269	// result: (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
29270	for {
29271		if v_0.Op != OpAdd64carry {
29272			break
29273		}
29274		c := v_0.Args[2]
29275		x := v_0.Args[0]
29276		y := v_0.Args[1]
29277		v.reset(OpSelect0)
29278		v.Type = typ.UInt64
29279		v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29280		v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29281		v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29282		v2.AddArg(c)
29283		v1.AddArg(v2)
29284		v0.AddArg3(x, y, v1)
29285		v.AddArg(v0)
29286		return true
29287	}
29288	// match: (Select0 (Sub64borrow x y c))
29289	// result: (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
29290	for {
29291		if v_0.Op != OpSub64borrow {
29292			break
29293		}
29294		c := v_0.Args[2]
29295		x := v_0.Args[0]
29296		y := v_0.Args[1]
29297		v.reset(OpSelect0)
29298		v.Type = typ.UInt64
29299		v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29300		v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29301		v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29302		v2.AddArg(c)
29303		v1.AddArg(v2)
29304		v0.AddArg3(x, y, v1)
29305		v.AddArg(v0)
29306		return true
29307	}
29308	// match: (Select0 <t> (AddTupleFirst32 val tuple))
29309	// result: (ADDL val (Select0 <t> tuple))
29310	for {
29311		t := v.Type
29312		if v_0.Op != OpAMD64AddTupleFirst32 {
29313			break
29314		}
29315		tuple := v_0.Args[1]
29316		val := v_0.Args[0]
29317		v.reset(OpAMD64ADDL)
29318		v0 := b.NewValue0(v.Pos, OpSelect0, t)
29319		v0.AddArg(tuple)
29320		v.AddArg2(val, v0)
29321		return true
29322	}
29323	// match: (Select0 <t> (AddTupleFirst64 val tuple))
29324	// result: (ADDQ val (Select0 <t> tuple))
29325	for {
29326		t := v.Type
29327		if v_0.Op != OpAMD64AddTupleFirst64 {
29328			break
29329		}
29330		tuple := v_0.Args[1]
29331		val := v_0.Args[0]
29332		v.reset(OpAMD64ADDQ)
29333		v0 := b.NewValue0(v.Pos, OpSelect0, t)
29334		v0.AddArg(tuple)
29335		v.AddArg2(val, v0)
29336		return true
29337	}
29338	return false
29339}
29340func rewriteValueAMD64_OpSelect1(v *Value) bool {
29341	v_0 := v.Args[0]
29342	b := v.Block
29343	typ := &b.Func.Config.Types
29344	// match: (Select1 (Mul64uover x y))
29345	// result: (SETO (Select1 <types.TypeFlags> (MULQU x y)))
29346	for {
29347		if v_0.Op != OpMul64uover {
29348			break
29349		}
29350		y := v_0.Args[1]
29351		x := v_0.Args[0]
29352		v.reset(OpAMD64SETO)
29353		v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29354		v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29355		v1.AddArg2(x, y)
29356		v0.AddArg(v1)
29357		v.AddArg(v0)
29358		return true
29359	}
29360	// match: (Select1 (Mul32uover x y))
29361	// result: (SETO (Select1 <types.TypeFlags> (MULLU x y)))
29362	for {
29363		if v_0.Op != OpMul32uover {
29364			break
29365		}
29366		y := v_0.Args[1]
29367		x := v_0.Args[0]
29368		v.reset(OpAMD64SETO)
29369		v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29370		v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29371		v1.AddArg2(x, y)
29372		v0.AddArg(v1)
29373		v.AddArg(v0)
29374		return true
29375	}
29376	// match: (Select1 (Add64carry x y c))
29377	// result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
29378	for {
29379		if v_0.Op != OpAdd64carry {
29380			break
29381		}
29382		c := v_0.Args[2]
29383		x := v_0.Args[0]
29384		y := v_0.Args[1]
29385		v.reset(OpAMD64NEGQ)
29386		v.Type = typ.UInt64
29387		v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
29388		v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29389		v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29390		v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29391		v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29392		v4.AddArg(c)
29393		v3.AddArg(v4)
29394		v2.AddArg3(x, y, v3)
29395		v1.AddArg(v2)
29396		v0.AddArg(v1)
29397		v.AddArg(v0)
29398		return true
29399	}
29400	// match: (Select1 (Sub64borrow x y c))
29401	// result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
29402	for {
29403		if v_0.Op != OpSub64borrow {
29404			break
29405		}
29406		c := v_0.Args[2]
29407		x := v_0.Args[0]
29408		y := v_0.Args[1]
29409		v.reset(OpAMD64NEGQ)
29410		v.Type = typ.UInt64
29411		v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
29412		v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29413		v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29414		v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29415		v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29416		v4.AddArg(c)
29417		v3.AddArg(v4)
29418		v2.AddArg3(x, y, v3)
29419		v1.AddArg(v2)
29420		v0.AddArg(v1)
29421		v.AddArg(v0)
29422		return true
29423	}
29424	// match: (Select1 (NEGLflags (MOVQconst [0])))
29425	// result: (FlagEQ)
29426	for {
29427		if v_0.Op != OpAMD64NEGLflags {
29428			break
29429		}
29430		v_0_0 := v_0.Args[0]
29431		if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
29432			break
29433		}
29434		v.reset(OpAMD64FlagEQ)
29435		return true
29436	}
29437	// match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x))))
29438	// result: x
29439	for {
29440		if v_0.Op != OpAMD64NEGLflags {
29441			break
29442		}
29443		v_0_0 := v_0.Args[0]
29444		if v_0_0.Op != OpAMD64NEGQ {
29445			break
29446		}
29447		v_0_0_0 := v_0_0.Args[0]
29448		if v_0_0_0.Op != OpAMD64SBBQcarrymask {
29449			break
29450		}
29451		x := v_0_0_0.Args[0]
29452		v.copyOf(x)
29453		return true
29454	}
29455	// match: (Select1 (AddTupleFirst32 _ tuple))
29456	// result: (Select1 tuple)
29457	for {
29458		if v_0.Op != OpAMD64AddTupleFirst32 {
29459			break
29460		}
29461		tuple := v_0.Args[1]
29462		v.reset(OpSelect1)
29463		v.AddArg(tuple)
29464		return true
29465	}
29466	// match: (Select1 (AddTupleFirst64 _ tuple))
29467	// result: (Select1 tuple)
29468	for {
29469		if v_0.Op != OpAMD64AddTupleFirst64 {
29470			break
29471		}
29472		tuple := v_0.Args[1]
29473		v.reset(OpSelect1)
29474		v.AddArg(tuple)
29475		return true
29476	}
29477	return false
29478}
29479func rewriteValueAMD64_OpSelectN(v *Value) bool {
29480	v_0 := v.Args[0]
29481	b := v.Block
29482	config := b.Func.Config
29483	// match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem)))))
29484	// cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)
29485	// result: (Move [sc.Val64()] dst src mem)
29486	for {
29487		if auxIntToInt64(v.AuxInt) != 0 {
29488			break
29489		}
29490		call := v_0
29491		if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 {
29492			break
29493		}
29494		sym := auxToCall(call.Aux)
29495		s1 := call.Args[0]
29496		if s1.Op != OpAMD64MOVQstoreconst {
29497			break
29498		}
29499		sc := auxIntToValAndOff(s1.AuxInt)
29500		_ = s1.Args[1]
29501		s2 := s1.Args[1]
29502		if s2.Op != OpAMD64MOVQstore {
29503			break
29504		}
29505		_ = s2.Args[2]
29506		src := s2.Args[1]
29507		s3 := s2.Args[2]
29508		if s3.Op != OpAMD64MOVQstore {
29509			break
29510		}
29511		mem := s3.Args[2]
29512		dst := s3.Args[1]
29513		if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) {
29514			break
29515		}
29516		v.reset(OpMove)
29517		v.AuxInt = int64ToAuxInt(sc.Val64())
29518		v.AddArg3(dst, src, mem)
29519		return true
29520	}
29521	// match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem))
29522	// cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
29523	// result: (Move [sz] dst src mem)
29524	for {
29525		if auxIntToInt64(v.AuxInt) != 0 {
29526			break
29527		}
29528		call := v_0
29529		if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 {
29530			break
29531		}
29532		sym := auxToCall(call.Aux)
29533		mem := call.Args[3]
29534		dst := call.Args[0]
29535		src := call.Args[1]
29536		call_2 := call.Args[2]
29537		if call_2.Op != OpAMD64MOVQconst {
29538			break
29539		}
29540		sz := auxIntToInt64(call_2.AuxInt)
29541		if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
29542			break
29543		}
29544		v.reset(OpMove)
29545		v.AuxInt = int64ToAuxInt(sz)
29546		v.AddArg3(dst, src, mem)
29547		return true
29548	}
29549	return false
29550}
29551func rewriteValueAMD64_OpSlicemask(v *Value) bool {
29552	v_0 := v.Args[0]
29553	b := v.Block
29554	// match: (Slicemask <t> x)
29555	// result: (SARQconst (NEGQ <t> x) [63])
29556	for {
29557		t := v.Type
29558		x := v_0
29559		v.reset(OpAMD64SARQconst)
29560		v.AuxInt = int8ToAuxInt(63)
29561		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
29562		v0.AddArg(x)
29563		v.AddArg(v0)
29564		return true
29565	}
29566}
29567func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
29568	v_1 := v.Args[1]
29569	v_0 := v.Args[0]
29570	b := v.Block
29571	typ := &b.Func.Config.Types
29572	// match: (SpectreIndex <t> x y)
29573	// result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
29574	for {
29575		x := v_0
29576		y := v_1
29577		v.reset(OpAMD64CMOVQCC)
29578		v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
29579		v0.AuxInt = int64ToAuxInt(0)
29580		v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29581		v1.AddArg2(x, y)
29582		v.AddArg3(x, v0, v1)
29583		return true
29584	}
29585}
29586func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
29587	v_1 := v.Args[1]
29588	v_0 := v.Args[0]
29589	b := v.Block
29590	typ := &b.Func.Config.Types
29591	// match: (SpectreSliceIndex <t> x y)
29592	// result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
29593	for {
29594		x := v_0
29595		y := v_1
29596		v.reset(OpAMD64CMOVQHI)
29597		v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
29598		v0.AuxInt = int64ToAuxInt(0)
29599		v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29600		v1.AddArg2(x, y)
29601		v.AddArg3(x, v0, v1)
29602		return true
29603	}
29604}
29605func rewriteValueAMD64_OpStore(v *Value) bool {
29606	v_2 := v.Args[2]
29607	v_1 := v.Args[1]
29608	v_0 := v.Args[0]
29609	// match: (Store {t} ptr val mem)
29610	// cond: t.Size() == 8 && t.IsFloat()
29611	// result: (MOVSDstore ptr val mem)
29612	for {
29613		t := auxToType(v.Aux)
29614		ptr := v_0
29615		val := v_1
29616		mem := v_2
29617		if !(t.Size() == 8 && t.IsFloat()) {
29618			break
29619		}
29620		v.reset(OpAMD64MOVSDstore)
29621		v.AddArg3(ptr, val, mem)
29622		return true
29623	}
29624	// match: (Store {t} ptr val mem)
29625	// cond: t.Size() == 4 && t.IsFloat()
29626	// result: (MOVSSstore ptr val mem)
29627	for {
29628		t := auxToType(v.Aux)
29629		ptr := v_0
29630		val := v_1
29631		mem := v_2
29632		if !(t.Size() == 4 && t.IsFloat()) {
29633			break
29634		}
29635		v.reset(OpAMD64MOVSSstore)
29636		v.AddArg3(ptr, val, mem)
29637		return true
29638	}
29639	// match: (Store {t} ptr val mem)
29640	// cond: t.Size() == 8 && !t.IsFloat()
29641	// result: (MOVQstore ptr val mem)
29642	for {
29643		t := auxToType(v.Aux)
29644		ptr := v_0
29645		val := v_1
29646		mem := v_2
29647		if !(t.Size() == 8 && !t.IsFloat()) {
29648			break
29649		}
29650		v.reset(OpAMD64MOVQstore)
29651		v.AddArg3(ptr, val, mem)
29652		return true
29653	}
29654	// match: (Store {t} ptr val mem)
29655	// cond: t.Size() == 4 && !t.IsFloat()
29656	// result: (MOVLstore ptr val mem)
29657	for {
29658		t := auxToType(v.Aux)
29659		ptr := v_0
29660		val := v_1
29661		mem := v_2
29662		if !(t.Size() == 4 && !t.IsFloat()) {
29663			break
29664		}
29665		v.reset(OpAMD64MOVLstore)
29666		v.AddArg3(ptr, val, mem)
29667		return true
29668	}
29669	// match: (Store {t} ptr val mem)
29670	// cond: t.Size() == 2
29671	// result: (MOVWstore ptr val mem)
29672	for {
29673		t := auxToType(v.Aux)
29674		ptr := v_0
29675		val := v_1
29676		mem := v_2
29677		if !(t.Size() == 2) {
29678			break
29679		}
29680		v.reset(OpAMD64MOVWstore)
29681		v.AddArg3(ptr, val, mem)
29682		return true
29683	}
29684	// match: (Store {t} ptr val mem)
29685	// cond: t.Size() == 1
29686	// result: (MOVBstore ptr val mem)
29687	for {
29688		t := auxToType(v.Aux)
29689		ptr := v_0
29690		val := v_1
29691		mem := v_2
29692		if !(t.Size() == 1) {
29693			break
29694		}
29695		v.reset(OpAMD64MOVBstore)
29696		v.AddArg3(ptr, val, mem)
29697		return true
29698	}
29699	return false
29700}
29701func rewriteValueAMD64_OpTrunc(v *Value) bool {
29702	v_0 := v.Args[0]
29703	// match: (Trunc x)
29704	// result: (ROUNDSD [3] x)
29705	for {
29706		x := v_0
29707		v.reset(OpAMD64ROUNDSD)
29708		v.AuxInt = int8ToAuxInt(3)
29709		v.AddArg(x)
29710		return true
29711	}
29712}
29713func rewriteValueAMD64_OpZero(v *Value) bool {
29714	v_1 := v.Args[1]
29715	v_0 := v.Args[0]
29716	b := v.Block
29717	config := b.Func.Config
29718	typ := &b.Func.Config.Types
29719	// match: (Zero [0] _ mem)
29720	// result: mem
29721	for {
29722		if auxIntToInt64(v.AuxInt) != 0 {
29723			break
29724		}
29725		mem := v_1
29726		v.copyOf(mem)
29727		return true
29728	}
29729	// match: (Zero [1] destptr mem)
29730	// result: (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
29731	for {
29732		if auxIntToInt64(v.AuxInt) != 1 {
29733			break
29734		}
29735		destptr := v_0
29736		mem := v_1
29737		v.reset(OpAMD64MOVBstoreconst)
29738		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29739		v.AddArg2(destptr, mem)
29740		return true
29741	}
29742	// match: (Zero [2] destptr mem)
29743	// result: (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
29744	for {
29745		if auxIntToInt64(v.AuxInt) != 2 {
29746			break
29747		}
29748		destptr := v_0
29749		mem := v_1
29750		v.reset(OpAMD64MOVWstoreconst)
29751		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29752		v.AddArg2(destptr, mem)
29753		return true
29754	}
29755	// match: (Zero [4] destptr mem)
29756	// result: (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
29757	for {
29758		if auxIntToInt64(v.AuxInt) != 4 {
29759			break
29760		}
29761		destptr := v_0
29762		mem := v_1
29763		v.reset(OpAMD64MOVLstoreconst)
29764		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29765		v.AddArg2(destptr, mem)
29766		return true
29767	}
29768	// match: (Zero [8] destptr mem)
29769	// result: (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
29770	for {
29771		if auxIntToInt64(v.AuxInt) != 8 {
29772			break
29773		}
29774		destptr := v_0
29775		mem := v_1
29776		v.reset(OpAMD64MOVQstoreconst)
29777		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29778		v.AddArg2(destptr, mem)
29779		return true
29780	}
29781	// match: (Zero [3] destptr mem)
29782	// result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
29783	for {
29784		if auxIntToInt64(v.AuxInt) != 3 {
29785			break
29786		}
29787		destptr := v_0
29788		mem := v_1
29789		v.reset(OpAMD64MOVBstoreconst)
29790		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
29791		v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
29792		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29793		v0.AddArg2(destptr, mem)
29794		v.AddArg2(destptr, v0)
29795		return true
29796	}
29797	// match: (Zero [5] destptr mem)
29798	// result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
29799	for {
29800		if auxIntToInt64(v.AuxInt) != 5 {
29801			break
29802		}
29803		destptr := v_0
29804		mem := v_1
29805		v.reset(OpAMD64MOVBstoreconst)
29806		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
29807		v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29808		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29809		v0.AddArg2(destptr, mem)
29810		v.AddArg2(destptr, v0)
29811		return true
29812	}
29813	// match: (Zero [6] destptr mem)
29814	// result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
29815	for {
29816		if auxIntToInt64(v.AuxInt) != 6 {
29817			break
29818		}
29819		destptr := v_0
29820		mem := v_1
29821		v.reset(OpAMD64MOVWstoreconst)
29822		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
29823		v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29824		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29825		v0.AddArg2(destptr, mem)
29826		v.AddArg2(destptr, v0)
29827		return true
29828	}
29829	// match: (Zero [7] destptr mem)
29830	// result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
29831	for {
29832		if auxIntToInt64(v.AuxInt) != 7 {
29833			break
29834		}
29835		destptr := v_0
29836		mem := v_1
29837		v.reset(OpAMD64MOVLstoreconst)
29838		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
29839		v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29840		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29841		v0.AddArg2(destptr, mem)
29842		v.AddArg2(destptr, v0)
29843		return true
29844	}
29845	// match: (Zero [s] destptr mem)
29846	// cond: s%8 != 0 && s > 8 && !config.useSSE
29847	// result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
29848	for {
29849		s := auxIntToInt64(v.AuxInt)
29850		destptr := v_0
29851		mem := v_1
29852		if !(s%8 != 0 && s > 8 && !config.useSSE) {
29853			break
29854		}
29855		v.reset(OpZero)
29856		v.AuxInt = int64ToAuxInt(s - s%8)
29857		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
29858		v0.AuxInt = int64ToAuxInt(s % 8)
29859		v0.AddArg(destptr)
29860		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29861		v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29862		v1.AddArg2(destptr, mem)
29863		v.AddArg2(v0, v1)
29864		return true
29865	}
29866	// match: (Zero [16] destptr mem)
29867	// cond: !config.useSSE
29868	// result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
29869	for {
29870		if auxIntToInt64(v.AuxInt) != 16 {
29871			break
29872		}
29873		destptr := v_0
29874		mem := v_1
29875		if !(!config.useSSE) {
29876			break
29877		}
29878		v.reset(OpAMD64MOVQstoreconst)
29879		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29880		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29881		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29882		v0.AddArg2(destptr, mem)
29883		v.AddArg2(destptr, v0)
29884		return true
29885	}
29886	// match: (Zero [24] destptr mem)
29887	// cond: !config.useSSE
29888	// result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))
29889	for {
29890		if auxIntToInt64(v.AuxInt) != 24 {
29891			break
29892		}
29893		destptr := v_0
29894		mem := v_1
29895		if !(!config.useSSE) {
29896			break
29897		}
29898		v.reset(OpAMD64MOVQstoreconst)
29899		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
29900		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29901		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29902		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29903		v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29904		v1.AddArg2(destptr, mem)
29905		v0.AddArg2(destptr, v1)
29906		v.AddArg2(destptr, v0)
29907		return true
29908	}
29909	// match: (Zero [32] destptr mem)
29910	// cond: !config.useSSE
29911	// result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
29912	for {
29913		if auxIntToInt64(v.AuxInt) != 32 {
29914			break
29915		}
29916		destptr := v_0
29917		mem := v_1
29918		if !(!config.useSSE) {
29919			break
29920		}
29921		v.reset(OpAMD64MOVQstoreconst)
29922		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24))
29923		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29924		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
29925		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29926		v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29927		v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29928		v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29929		v2.AddArg2(destptr, mem)
29930		v1.AddArg2(destptr, v2)
29931		v0.AddArg2(destptr, v1)
29932		v.AddArg2(destptr, v0)
29933		return true
29934	}
29935	// match: (Zero [9] destptr mem)
29936	// cond: config.useSSE
29937	// result: (MOVBstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
29938	for {
29939		if auxIntToInt64(v.AuxInt) != 9 {
29940			break
29941		}
29942		destptr := v_0
29943		mem := v_1
29944		if !(config.useSSE) {
29945			break
29946		}
29947		v.reset(OpAMD64MOVBstoreconst)
29948		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29949		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29950		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29951		v0.AddArg2(destptr, mem)
29952		v.AddArg2(destptr, v0)
29953		return true
29954	}
29955	// match: (Zero [10] destptr mem)
29956	// cond: config.useSSE
29957	// result: (MOVWstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
29958	for {
29959		if auxIntToInt64(v.AuxInt) != 10 {
29960			break
29961		}
29962		destptr := v_0
29963		mem := v_1
29964		if !(config.useSSE) {
29965			break
29966		}
29967		v.reset(OpAMD64MOVWstoreconst)
29968		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29969		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29970		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29971		v0.AddArg2(destptr, mem)
29972		v.AddArg2(destptr, v0)
29973		return true
29974	}
29975	// match: (Zero [11] destptr mem)
29976	// cond: config.useSSE
29977	// result: (MOVLstoreconst [makeValAndOff(0,7)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
29978	for {
29979		if auxIntToInt64(v.AuxInt) != 11 {
29980			break
29981		}
29982		destptr := v_0
29983		mem := v_1
29984		if !(config.useSSE) {
29985			break
29986		}
29987		v.reset(OpAMD64MOVLstoreconst)
29988		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7))
29989		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29990		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29991		v0.AddArg2(destptr, mem)
29992		v.AddArg2(destptr, v0)
29993		return true
29994	}
29995	// match: (Zero [12] destptr mem)
29996	// cond: config.useSSE
29997	// result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
29998	for {
29999		if auxIntToInt64(v.AuxInt) != 12 {
30000			break
30001		}
30002		destptr := v_0
30003		mem := v_1
30004		if !(config.useSSE) {
30005			break
30006		}
30007		v.reset(OpAMD64MOVLstoreconst)
30008		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30009		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30010		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30011		v0.AddArg2(destptr, mem)
30012		v.AddArg2(destptr, v0)
30013		return true
30014	}
30015	// match: (Zero [s] destptr mem)
30016	// cond: s > 12 && s < 16 && config.useSSE
30017	// result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
30018	for {
30019		s := auxIntToInt64(v.AuxInt)
30020		destptr := v_0
30021		mem := v_1
30022		if !(s > 12 && s < 16 && config.useSSE) {
30023			break
30024		}
30025		v.reset(OpAMD64MOVQstoreconst)
30026		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
30027		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30028		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30029		v0.AddArg2(destptr, mem)
30030		v.AddArg2(destptr, v0)
30031		return true
30032	}
30033	// match: (Zero [s] destptr mem)
30034	// cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE
30035	// result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
30036	for {
30037		s := auxIntToInt64(v.AuxInt)
30038		destptr := v_0
30039		mem := v_1
30040		if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
30041			break
30042		}
30043		v.reset(OpZero)
30044		v.AuxInt = int64ToAuxInt(s - s%16)
30045		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
30046		v0.AuxInt = int64ToAuxInt(s % 16)
30047		v0.AddArg(destptr)
30048		v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30049		v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30050		v1.AddArg2(destptr, mem)
30051		v.AddArg2(v0, v1)
30052		return true
30053	}
30054	// match: (Zero [s] destptr mem)
30055	// cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE
30056	// result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
30057	for {
30058		s := auxIntToInt64(v.AuxInt)
30059		destptr := v_0
30060		mem := v_1
30061		if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
30062			break
30063		}
30064		v.reset(OpZero)
30065		v.AuxInt = int64ToAuxInt(s - s%16)
30066		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
30067		v0.AuxInt = int64ToAuxInt(s % 16)
30068		v0.AddArg(destptr)
30069		v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30070		v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30071		v1.AddArg2(destptr, mem)
30072		v.AddArg2(v0, v1)
30073		return true
30074	}
30075	// match: (Zero [16] destptr mem)
30076	// cond: config.useSSE
30077	// result: (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)
30078	for {
30079		if auxIntToInt64(v.AuxInt) != 16 {
30080			break
30081		}
30082		destptr := v_0
30083		mem := v_1
30084		if !(config.useSSE) {
30085			break
30086		}
30087		v.reset(OpAMD64MOVOstoreconst)
30088		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30089		v.AddArg2(destptr, mem)
30090		return true
30091	}
30092	// match: (Zero [32] destptr mem)
30093	// cond: config.useSSE
30094	// result: (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
30095	for {
30096		if auxIntToInt64(v.AuxInt) != 32 {
30097			break
30098		}
30099		destptr := v_0
30100		mem := v_1
30101		if !(config.useSSE) {
30102			break
30103		}
30104		v.reset(OpAMD64MOVOstoreconst)
30105		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30106		v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30107		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30108		v0.AddArg2(destptr, mem)
30109		v.AddArg2(destptr, v0)
30110		return true
30111	}
30112	// match: (Zero [48] destptr mem)
30113	// cond: config.useSSE
30114	// result: (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))
30115	for {
30116		if auxIntToInt64(v.AuxInt) != 48 {
30117			break
30118		}
30119		destptr := v_0
30120		mem := v_1
30121		if !(config.useSSE) {
30122			break
30123		}
30124		v.reset(OpAMD64MOVOstoreconst)
30125		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30126		v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30127		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30128		v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30129		v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30130		v1.AddArg2(destptr, mem)
30131		v0.AddArg2(destptr, v1)
30132		v.AddArg2(destptr, v0)
30133		return true
30134	}
30135	// match: (Zero [64] destptr mem)
30136	// cond: config.useSSE
30137	// result: (MOVOstoreconst [makeValAndOff(0,48)] destptr (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))))
30138	for {
30139		if auxIntToInt64(v.AuxInt) != 64 {
30140			break
30141		}
30142		destptr := v_0
30143		mem := v_1
30144		if !(config.useSSE) {
30145			break
30146		}
30147		v.reset(OpAMD64MOVOstoreconst)
30148		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48))
30149		v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30150		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30151		v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30152		v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30153		v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30154		v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30155		v2.AddArg2(destptr, mem)
30156		v1.AddArg2(destptr, v2)
30157		v0.AddArg2(destptr, v1)
30158		v.AddArg2(destptr, v0)
30159		return true
30160	}
30161	// match: (Zero [s] destptr mem)
30162	// cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice
30163	// result: (DUFFZERO [s] destptr mem)
30164	for {
30165		s := auxIntToInt64(v.AuxInt)
30166		destptr := v_0
30167		mem := v_1
30168		if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
30169			break
30170		}
30171		v.reset(OpAMD64DUFFZERO)
30172		v.AuxInt = int64ToAuxInt(s)
30173		v.AddArg2(destptr, mem)
30174		return true
30175	}
30176	// match: (Zero [s] destptr mem)
30177	// cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0
30178	// result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
30179	for {
30180		s := auxIntToInt64(v.AuxInt)
30181		destptr := v_0
30182		mem := v_1
30183		if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
30184			break
30185		}
30186		v.reset(OpAMD64REPSTOSQ)
30187		v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30188		v0.AuxInt = int64ToAuxInt(s / 8)
30189		v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30190		v1.AuxInt = int64ToAuxInt(0)
30191		v.AddArg4(destptr, v0, v1, mem)
30192		return true
30193	}
30194	return false
30195}
30196func rewriteBlockAMD64(b *Block) bool {
30197	typ := &b.Func.Config.Types
30198	switch b.Kind {
30199	case BlockAMD64EQ:
30200		// match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
30201		// result: (UGE (BTL x y))
30202		for b.Controls[0].Op == OpAMD64TESTL {
30203			v_0 := b.Controls[0]
30204			_ = v_0.Args[1]
30205			v_0_0 := v_0.Args[0]
30206			v_0_1 := v_0.Args[1]
30207			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30208				if v_0_0.Op != OpAMD64SHLL {
30209					continue
30210				}
30211				x := v_0_0.Args[1]
30212				v_0_0_0 := v_0_0.Args[0]
30213				if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
30214					continue
30215				}
30216				y := v_0_1
30217				v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
30218				v0.AddArg2(x, y)
30219				b.resetWithControl(BlockAMD64UGE, v0)
30220				return true
30221			}
30222			break
30223		}
30224		// match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
30225		// result: (UGE (BTQ x y))
30226		for b.Controls[0].Op == OpAMD64TESTQ {
30227			v_0 := b.Controls[0]
30228			_ = v_0.Args[1]
30229			v_0_0 := v_0.Args[0]
30230			v_0_1 := v_0.Args[1]
30231			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30232				if v_0_0.Op != OpAMD64SHLQ {
30233					continue
30234				}
30235				x := v_0_0.Args[1]
30236				v_0_0_0 := v_0_0.Args[0]
30237				if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
30238					continue
30239				}
30240				y := v_0_1
30241				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
30242				v0.AddArg2(x, y)
30243				b.resetWithControl(BlockAMD64UGE, v0)
30244				return true
30245			}
30246			break
30247		}
30248		// match: (EQ (TESTLconst [c] x))
30249		// cond: isUint32PowerOfTwo(int64(c))
30250		// result: (UGE (BTLconst [int8(log32(c))] x))
30251		for b.Controls[0].Op == OpAMD64TESTLconst {
30252			v_0 := b.Controls[0]
30253			c := auxIntToInt32(v_0.AuxInt)
30254			x := v_0.Args[0]
30255			if !(isUint32PowerOfTwo(int64(c))) {
30256				break
30257			}
30258			v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30259			v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30260			v0.AddArg(x)
30261			b.resetWithControl(BlockAMD64UGE, v0)
30262			return true
30263		}
30264		// match: (EQ (TESTQconst [c] x))
30265		// cond: isUint64PowerOfTwo(int64(c))
30266		// result: (UGE (BTQconst [int8(log32(c))] x))
30267		for b.Controls[0].Op == OpAMD64TESTQconst {
30268			v_0 := b.Controls[0]
30269			c := auxIntToInt32(v_0.AuxInt)
30270			x := v_0.Args[0]
30271			if !(isUint64PowerOfTwo(int64(c))) {
30272				break
30273			}
30274			v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30275			v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30276			v0.AddArg(x)
30277			b.resetWithControl(BlockAMD64UGE, v0)
30278			return true
30279		}
30280		// match: (EQ (TESTQ (MOVQconst [c]) x))
30281		// cond: isUint64PowerOfTwo(c)
30282		// result: (UGE (BTQconst [int8(log64(c))] x))
30283		for b.Controls[0].Op == OpAMD64TESTQ {
30284			v_0 := b.Controls[0]
30285			_ = v_0.Args[1]
30286			v_0_0 := v_0.Args[0]
30287			v_0_1 := v_0.Args[1]
30288			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30289				if v_0_0.Op != OpAMD64MOVQconst {
30290					continue
30291				}
30292				c := auxIntToInt64(v_0_0.AuxInt)
30293				x := v_0_1
30294				if !(isUint64PowerOfTwo(c)) {
30295					continue
30296				}
30297				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30298				v0.AuxInt = int8ToAuxInt(int8(log64(c)))
30299				v0.AddArg(x)
30300				b.resetWithControl(BlockAMD64UGE, v0)
30301				return true
30302			}
30303			break
30304		}
30305		// match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
30306		// cond: z1==z2
30307		// result: (UGE (BTQconst [63] x))
30308		for b.Controls[0].Op == OpAMD64TESTQ {
30309			v_0 := b.Controls[0]
30310			_ = v_0.Args[1]
30311			v_0_0 := v_0.Args[0]
30312			v_0_1 := v_0.Args[1]
30313			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30314				z1 := v_0_0
30315				if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
30316					continue
30317				}
30318				z1_0 := z1.Args[0]
30319				if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30320					continue
30321				}
30322				x := z1_0.Args[0]
30323				z2 := v_0_1
30324				if !(z1 == z2) {
30325					continue
30326				}
30327				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30328				v0.AuxInt = int8ToAuxInt(63)
30329				v0.AddArg(x)
30330				b.resetWithControl(BlockAMD64UGE, v0)
30331				return true
30332			}
30333			break
30334		}
30335		// match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
30336		// cond: z1==z2
30337		// result: (UGE (BTQconst [31] x))
30338		for b.Controls[0].Op == OpAMD64TESTL {
30339			v_0 := b.Controls[0]
30340			_ = v_0.Args[1]
30341			v_0_0 := v_0.Args[0]
30342			v_0_1 := v_0.Args[1]
30343			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30344				z1 := v_0_0
30345				if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
30346					continue
30347				}
30348				z1_0 := z1.Args[0]
30349				if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30350					continue
30351				}
30352				x := z1_0.Args[0]
30353				z2 := v_0_1
30354				if !(z1 == z2) {
30355					continue
30356				}
30357				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30358				v0.AuxInt = int8ToAuxInt(31)
30359				v0.AddArg(x)
30360				b.resetWithControl(BlockAMD64UGE, v0)
30361				return true
30362			}
30363			break
30364		}
30365		// match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
30366		// cond: z1==z2
30367		// result: (UGE (BTQconst [0] x))
30368		for b.Controls[0].Op == OpAMD64TESTQ {
30369			v_0 := b.Controls[0]
30370			_ = v_0.Args[1]
30371			v_0_0 := v_0.Args[0]
30372			v_0_1 := v_0.Args[1]
30373			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30374				z1 := v_0_0
30375				if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30376					continue
30377				}
30378				z1_0 := z1.Args[0]
30379				if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30380					continue
30381				}
30382				x := z1_0.Args[0]
30383				z2 := v_0_1
30384				if !(z1 == z2) {
30385					continue
30386				}
30387				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30388				v0.AuxInt = int8ToAuxInt(0)
30389				v0.AddArg(x)
30390				b.resetWithControl(BlockAMD64UGE, v0)
30391				return true
30392			}
30393			break
30394		}
30395		// match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
30396		// cond: z1==z2
30397		// result: (UGE (BTLconst [0] x))
30398		for b.Controls[0].Op == OpAMD64TESTL {
30399			v_0 := b.Controls[0]
30400			_ = v_0.Args[1]
30401			v_0_0 := v_0.Args[0]
30402			v_0_1 := v_0.Args[1]
30403			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30404				z1 := v_0_0
30405				if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30406					continue
30407				}
30408				z1_0 := z1.Args[0]
30409				if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30410					continue
30411				}
30412				x := z1_0.Args[0]
30413				z2 := v_0_1
30414				if !(z1 == z2) {
30415					continue
30416				}
30417				v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30418				v0.AuxInt = int8ToAuxInt(0)
30419				v0.AddArg(x)
30420				b.resetWithControl(BlockAMD64UGE, v0)
30421				return true
30422			}
30423			break
30424		}
30425		// match: (EQ (TESTQ z1:(SHRQconst [63] x) z2))
30426		// cond: z1==z2
30427		// result: (UGE (BTQconst [63] x))
30428		for b.Controls[0].Op == OpAMD64TESTQ {
30429			v_0 := b.Controls[0]
30430			_ = v_0.Args[1]
30431			v_0_0 := v_0.Args[0]
30432			v_0_1 := v_0.Args[1]
30433			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30434				z1 := v_0_0
30435				if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30436					continue
30437				}
30438				x := z1.Args[0]
30439				z2 := v_0_1
30440				if !(z1 == z2) {
30441					continue
30442				}
30443				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30444				v0.AuxInt = int8ToAuxInt(63)
30445				v0.AddArg(x)
30446				b.resetWithControl(BlockAMD64UGE, v0)
30447				return true
30448			}
30449			break
30450		}
30451		// match: (EQ (TESTL z1:(SHRLconst [31] x) z2))
30452		// cond: z1==z2
30453		// result: (UGE (BTLconst [31] x))
30454		for b.Controls[0].Op == OpAMD64TESTL {
30455			v_0 := b.Controls[0]
30456			_ = v_0.Args[1]
30457			v_0_0 := v_0.Args[0]
30458			v_0_1 := v_0.Args[1]
30459			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30460				z1 := v_0_0
30461				if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30462					continue
30463				}
30464				x := z1.Args[0]
30465				z2 := v_0_1
30466				if !(z1 == z2) {
30467					continue
30468				}
30469				v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30470				v0.AuxInt = int8ToAuxInt(31)
30471				v0.AddArg(x)
30472				b.resetWithControl(BlockAMD64UGE, v0)
30473				return true
30474			}
30475			break
30476		}
30477		// match: (EQ (InvertFlags cmp) yes no)
30478		// result: (EQ cmp yes no)
30479		for b.Controls[0].Op == OpAMD64InvertFlags {
30480			v_0 := b.Controls[0]
30481			cmp := v_0.Args[0]
30482			b.resetWithControl(BlockAMD64EQ, cmp)
30483			return true
30484		}
30485		// match: (EQ (FlagEQ) yes no)
30486		// result: (First yes no)
30487		for b.Controls[0].Op == OpAMD64FlagEQ {
30488			b.Reset(BlockFirst)
30489			return true
30490		}
30491		// match: (EQ (FlagLT_ULT) yes no)
30492		// result: (First no yes)
30493		for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30494			b.Reset(BlockFirst)
30495			b.swapSuccessors()
30496			return true
30497		}
30498		// match: (EQ (FlagLT_UGT) yes no)
30499		// result: (First no yes)
30500		for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30501			b.Reset(BlockFirst)
30502			b.swapSuccessors()
30503			return true
30504		}
30505		// match: (EQ (FlagGT_ULT) yes no)
30506		// result: (First no yes)
30507		for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30508			b.Reset(BlockFirst)
30509			b.swapSuccessors()
30510			return true
30511		}
30512		// match: (EQ (FlagGT_UGT) yes no)
30513		// result: (First no yes)
30514		for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30515			b.Reset(BlockFirst)
30516			b.swapSuccessors()
30517			return true
30518		}
30519		// match: (EQ (TESTQ s:(Select0 blsr:(BLSRQ _)) s) yes no)
30520		// result: (EQ (Select1 <types.TypeFlags> blsr) yes no)
30521		for b.Controls[0].Op == OpAMD64TESTQ {
30522			v_0 := b.Controls[0]
30523			_ = v_0.Args[1]
30524			v_0_0 := v_0.Args[0]
30525			v_0_1 := v_0.Args[1]
30526			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30527				s := v_0_0
30528				if s.Op != OpSelect0 {
30529					continue
30530				}
30531				blsr := s.Args[0]
30532				if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
30533					continue
30534				}
30535				v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
30536				v0.AddArg(blsr)
30537				b.resetWithControl(BlockAMD64EQ, v0)
30538				return true
30539			}
30540			break
30541		}
30542		// match: (EQ (TESTL s:(Select0 blsr:(BLSRL _)) s) yes no)
30543		// result: (EQ (Select1 <types.TypeFlags> blsr) yes no)
30544		for b.Controls[0].Op == OpAMD64TESTL {
30545			v_0 := b.Controls[0]
30546			_ = v_0.Args[1]
30547			v_0_0 := v_0.Args[0]
30548			v_0_1 := v_0.Args[1]
30549			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30550				s := v_0_0
30551				if s.Op != OpSelect0 {
30552					continue
30553				}
30554				blsr := s.Args[0]
30555				if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
30556					continue
30557				}
30558				v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
30559				v0.AddArg(blsr)
30560				b.resetWithControl(BlockAMD64EQ, v0)
30561				return true
30562			}
30563			break
30564		}
30565	case BlockAMD64GE:
30566		// match: (GE (InvertFlags cmp) yes no)
30567		// result: (LE cmp yes no)
30568		for b.Controls[0].Op == OpAMD64InvertFlags {
30569			v_0 := b.Controls[0]
30570			cmp := v_0.Args[0]
30571			b.resetWithControl(BlockAMD64LE, cmp)
30572			return true
30573		}
30574		// match: (GE (FlagEQ) yes no)
30575		// result: (First yes no)
30576		for b.Controls[0].Op == OpAMD64FlagEQ {
30577			b.Reset(BlockFirst)
30578			return true
30579		}
30580		// match: (GE (FlagLT_ULT) yes no)
30581		// result: (First no yes)
30582		for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30583			b.Reset(BlockFirst)
30584			b.swapSuccessors()
30585			return true
30586		}
30587		// match: (GE (FlagLT_UGT) yes no)
30588		// result: (First no yes)
30589		for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30590			b.Reset(BlockFirst)
30591			b.swapSuccessors()
30592			return true
30593		}
30594		// match: (GE (FlagGT_ULT) yes no)
30595		// result: (First yes no)
30596		for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30597			b.Reset(BlockFirst)
30598			return true
30599		}
30600		// match: (GE (FlagGT_UGT) yes no)
30601		// result: (First yes no)
30602		for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30603			b.Reset(BlockFirst)
30604			return true
30605		}
30606	case BlockAMD64GT:
30607		// match: (GT (InvertFlags cmp) yes no)
30608		// result: (LT cmp yes no)
30609		for b.Controls[0].Op == OpAMD64InvertFlags {
30610			v_0 := b.Controls[0]
30611			cmp := v_0.Args[0]
30612			b.resetWithControl(BlockAMD64LT, cmp)
30613			return true
30614		}
30615		// match: (GT (FlagEQ) yes no)
30616		// result: (First no yes)
30617		for b.Controls[0].Op == OpAMD64FlagEQ {
30618			b.Reset(BlockFirst)
30619			b.swapSuccessors()
30620			return true
30621		}
30622		// match: (GT (FlagLT_ULT) yes no)
30623		// result: (First no yes)
30624		for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30625			b.Reset(BlockFirst)
30626			b.swapSuccessors()
30627			return true
30628		}
30629		// match: (GT (FlagLT_UGT) yes no)
30630		// result: (First no yes)
30631		for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30632			b.Reset(BlockFirst)
30633			b.swapSuccessors()
30634			return true
30635		}
30636		// match: (GT (FlagGT_ULT) yes no)
30637		// result: (First yes no)
30638		for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30639			b.Reset(BlockFirst)
30640			return true
30641		}
30642		// match: (GT (FlagGT_UGT) yes no)
30643		// result: (First yes no)
30644		for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30645			b.Reset(BlockFirst)
30646			return true
30647		}
30648	case BlockIf:
30649		// match: (If (SETL cmp) yes no)
30650		// result: (LT cmp yes no)
30651		for b.Controls[0].Op == OpAMD64SETL {
30652			v_0 := b.Controls[0]
30653			cmp := v_0.Args[0]
30654			b.resetWithControl(BlockAMD64LT, cmp)
30655			return true
30656		}
30657		// match: (If (SETLE cmp) yes no)
30658		// result: (LE cmp yes no)
30659		for b.Controls[0].Op == OpAMD64SETLE {
30660			v_0 := b.Controls[0]
30661			cmp := v_0.Args[0]
30662			b.resetWithControl(BlockAMD64LE, cmp)
30663			return true
30664		}
30665		// match: (If (SETG cmp) yes no)
30666		// result: (GT cmp yes no)
30667		for b.Controls[0].Op == OpAMD64SETG {
30668			v_0 := b.Controls[0]
30669			cmp := v_0.Args[0]
30670			b.resetWithControl(BlockAMD64GT, cmp)
30671			return true
30672		}
30673		// match: (If (SETGE cmp) yes no)
30674		// result: (GE cmp yes no)
30675		for b.Controls[0].Op == OpAMD64SETGE {
30676			v_0 := b.Controls[0]
30677			cmp := v_0.Args[0]
30678			b.resetWithControl(BlockAMD64GE, cmp)
30679			return true
30680		}
30681		// match: (If (SETEQ cmp) yes no)
30682		// result: (EQ cmp yes no)
30683		for b.Controls[0].Op == OpAMD64SETEQ {
30684			v_0 := b.Controls[0]
30685			cmp := v_0.Args[0]
30686			b.resetWithControl(BlockAMD64EQ, cmp)
30687			return true
30688		}
30689		// match: (If (SETNE cmp) yes no)
30690		// result: (NE cmp yes no)
30691		for b.Controls[0].Op == OpAMD64SETNE {
30692			v_0 := b.Controls[0]
30693			cmp := v_0.Args[0]
30694			b.resetWithControl(BlockAMD64NE, cmp)
30695			return true
30696		}
30697		// match: (If (SETB cmp) yes no)
30698		// result: (ULT cmp yes no)
30699		for b.Controls[0].Op == OpAMD64SETB {
30700			v_0 := b.Controls[0]
30701			cmp := v_0.Args[0]
30702			b.resetWithControl(BlockAMD64ULT, cmp)
30703			return true
30704		}
30705		// match: (If (SETBE cmp) yes no)
30706		// result: (ULE cmp yes no)
30707		for b.Controls[0].Op == OpAMD64SETBE {
30708			v_0 := b.Controls[0]
30709			cmp := v_0.Args[0]
30710			b.resetWithControl(BlockAMD64ULE, cmp)
30711			return true
30712		}
30713		// match: (If (SETA cmp) yes no)
30714		// result: (UGT cmp yes no)
30715		for b.Controls[0].Op == OpAMD64SETA {
30716			v_0 := b.Controls[0]
30717			cmp := v_0.Args[0]
30718			b.resetWithControl(BlockAMD64UGT, cmp)
30719			return true
30720		}
30721		// match: (If (SETAE cmp) yes no)
30722		// result: (UGE cmp yes no)
30723		for b.Controls[0].Op == OpAMD64SETAE {
30724			v_0 := b.Controls[0]
30725			cmp := v_0.Args[0]
30726			b.resetWithControl(BlockAMD64UGE, cmp)
30727			return true
30728		}
30729		// match: (If (SETO cmp) yes no)
30730		// result: (OS cmp yes no)
30731		for b.Controls[0].Op == OpAMD64SETO {
30732			v_0 := b.Controls[0]
30733			cmp := v_0.Args[0]
30734			b.resetWithControl(BlockAMD64OS, cmp)
30735			return true
30736		}
30737		// match: (If (SETGF cmp) yes no)
30738		// result: (UGT cmp yes no)
30739		for b.Controls[0].Op == OpAMD64SETGF {
30740			v_0 := b.Controls[0]
30741			cmp := v_0.Args[0]
30742			b.resetWithControl(BlockAMD64UGT, cmp)
30743			return true
30744		}
30745		// match: (If (SETGEF cmp) yes no)
30746		// result: (UGE cmp yes no)
30747		for b.Controls[0].Op == OpAMD64SETGEF {
30748			v_0 := b.Controls[0]
30749			cmp := v_0.Args[0]
30750			b.resetWithControl(BlockAMD64UGE, cmp)
30751			return true
30752		}
30753		// match: (If (SETEQF cmp) yes no)
30754		// result: (EQF cmp yes no)
30755		for b.Controls[0].Op == OpAMD64SETEQF {
30756			v_0 := b.Controls[0]
30757			cmp := v_0.Args[0]
30758			b.resetWithControl(BlockAMD64EQF, cmp)
30759			return true
30760		}
30761		// match: (If (SETNEF cmp) yes no)
30762		// result: (NEF cmp yes no)
30763		for b.Controls[0].Op == OpAMD64SETNEF {
30764			v_0 := b.Controls[0]
30765			cmp := v_0.Args[0]
30766			b.resetWithControl(BlockAMD64NEF, cmp)
30767			return true
30768		}
30769		// match: (If cond yes no)
30770		// result: (NE (TESTB cond cond) yes no)
30771		for {
30772			cond := b.Controls[0]
30773			v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
30774			v0.AddArg2(cond, cond)
30775			b.resetWithControl(BlockAMD64NE, v0)
30776			return true
30777		}
30778	case BlockJumpTable:
30779		// match: (JumpTable idx)
30780		// result: (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
30781		for {
30782			idx := b.Controls[0]
30783			v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr)
30784			v0.Aux = symToAux(makeJumpTableSym(b))
30785			v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr)
30786			v0.AddArg(v1)
30787			b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0)
30788			b.Aux = symToAux(makeJumpTableSym(b))
30789			return true
30790		}
30791	case BlockAMD64LE:
30792		// match: (LE (InvertFlags cmp) yes no)
30793		// result: (GE cmp yes no)
30794		for b.Controls[0].Op == OpAMD64InvertFlags {
30795			v_0 := b.Controls[0]
30796			cmp := v_0.Args[0]
30797			b.resetWithControl(BlockAMD64GE, cmp)
30798			return true
30799		}
30800		// match: (LE (FlagEQ) yes no)
30801		// result: (First yes no)
30802		for b.Controls[0].Op == OpAMD64FlagEQ {
30803			b.Reset(BlockFirst)
30804			return true
30805		}
30806		// match: (LE (FlagLT_ULT) yes no)
30807		// result: (First yes no)
30808		for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30809			b.Reset(BlockFirst)
30810			return true
30811		}
30812		// match: (LE (FlagLT_UGT) yes no)
30813		// result: (First yes no)
30814		for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30815			b.Reset(BlockFirst)
30816			return true
30817		}
30818		// match: (LE (FlagGT_ULT) yes no)
30819		// result: (First no yes)
30820		for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30821			b.Reset(BlockFirst)
30822			b.swapSuccessors()
30823			return true
30824		}
30825		// match: (LE (FlagGT_UGT) yes no)
30826		// result: (First no yes)
30827		for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30828			b.Reset(BlockFirst)
30829			b.swapSuccessors()
30830			return true
30831		}
30832	case BlockAMD64LT:
30833		// match: (LT (InvertFlags cmp) yes no)
30834		// result: (GT cmp yes no)
30835		for b.Controls[0].Op == OpAMD64InvertFlags {
30836			v_0 := b.Controls[0]
30837			cmp := v_0.Args[0]
30838			b.resetWithControl(BlockAMD64GT, cmp)
30839			return true
30840		}
30841		// match: (LT (FlagEQ) yes no)
30842		// result: (First no yes)
30843		for b.Controls[0].Op == OpAMD64FlagEQ {
30844			b.Reset(BlockFirst)
30845			b.swapSuccessors()
30846			return true
30847		}
30848		// match: (LT (FlagLT_ULT) yes no)
30849		// result: (First yes no)
30850		for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30851			b.Reset(BlockFirst)
30852			return true
30853		}
30854		// match: (LT (FlagLT_UGT) yes no)
30855		// result: (First yes no)
30856		for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30857			b.Reset(BlockFirst)
30858			return true
30859		}
30860		// match: (LT (FlagGT_ULT) yes no)
30861		// result: (First no yes)
30862		for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30863			b.Reset(BlockFirst)
30864			b.swapSuccessors()
30865			return true
30866		}
30867		// match: (LT (FlagGT_UGT) yes no)
30868		// result: (First no yes)
30869		for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30870			b.Reset(BlockFirst)
30871			b.swapSuccessors()
30872			return true
30873		}
30874	case BlockAMD64NE:
30875		// match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
30876		// result: (LT cmp yes no)
30877		for b.Controls[0].Op == OpAMD64TESTB {
30878			v_0 := b.Controls[0]
30879			_ = v_0.Args[1]
30880			v_0_0 := v_0.Args[0]
30881			if v_0_0.Op != OpAMD64SETL {
30882				break
30883			}
30884			cmp := v_0_0.Args[0]
30885			v_0_1 := v_0.Args[1]
30886			if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
30887				break
30888			}
30889			b.resetWithControl(BlockAMD64LT, cmp)
30890			return true
30891		}
30892		// match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
30893		// result: (LE cmp yes no)
30894		for b.Controls[0].Op == OpAMD64TESTB {
30895			v_0 := b.Controls[0]
30896			_ = v_0.Args[1]
30897			v_0_0 := v_0.Args[0]
30898			if v_0_0.Op != OpAMD64SETLE {
30899				break
30900			}
30901			cmp := v_0_0.Args[0]
30902			v_0_1 := v_0.Args[1]
30903			if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
30904				break
30905			}
30906			b.resetWithControl(BlockAMD64LE, cmp)
30907			return true
30908		}
30909		// match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
30910		// result: (GT cmp yes no)
30911		for b.Controls[0].Op == OpAMD64TESTB {
30912			v_0 := b.Controls[0]
30913			_ = v_0.Args[1]
30914			v_0_0 := v_0.Args[0]
30915			if v_0_0.Op != OpAMD64SETG {
30916				break
30917			}
30918			cmp := v_0_0.Args[0]
30919			v_0_1 := v_0.Args[1]
30920			if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
30921				break
30922			}
30923			b.resetWithControl(BlockAMD64GT, cmp)
30924			return true
30925		}
30926		// match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
30927		// result: (GE cmp yes no)
30928		for b.Controls[0].Op == OpAMD64TESTB {
30929			v_0 := b.Controls[0]
30930			_ = v_0.Args[1]
30931			v_0_0 := v_0.Args[0]
30932			if v_0_0.Op != OpAMD64SETGE {
30933				break
30934			}
30935			cmp := v_0_0.Args[0]
30936			v_0_1 := v_0.Args[1]
30937			if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
30938				break
30939			}
30940			b.resetWithControl(BlockAMD64GE, cmp)
30941			return true
30942		}
30943		// match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
30944		// result: (EQ cmp yes no)
30945		for b.Controls[0].Op == OpAMD64TESTB {
30946			v_0 := b.Controls[0]
30947			_ = v_0.Args[1]
30948			v_0_0 := v_0.Args[0]
30949			if v_0_0.Op != OpAMD64SETEQ {
30950				break
30951			}
30952			cmp := v_0_0.Args[0]
30953			v_0_1 := v_0.Args[1]
30954			if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
30955				break
30956			}
30957			b.resetWithControl(BlockAMD64EQ, cmp)
30958			return true
30959		}
30960		// match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
30961		// result: (NE cmp yes no)
30962		for b.Controls[0].Op == OpAMD64TESTB {
30963			v_0 := b.Controls[0]
30964			_ = v_0.Args[1]
30965			v_0_0 := v_0.Args[0]
30966			if v_0_0.Op != OpAMD64SETNE {
30967				break
30968			}
30969			cmp := v_0_0.Args[0]
30970			v_0_1 := v_0.Args[1]
30971			if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
30972				break
30973			}
30974			b.resetWithControl(BlockAMD64NE, cmp)
30975			return true
30976		}
30977		// match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
30978		// result: (ULT cmp yes no)
30979		for b.Controls[0].Op == OpAMD64TESTB {
30980			v_0 := b.Controls[0]
30981			_ = v_0.Args[1]
30982			v_0_0 := v_0.Args[0]
30983			if v_0_0.Op != OpAMD64SETB {
30984				break
30985			}
30986			cmp := v_0_0.Args[0]
30987			v_0_1 := v_0.Args[1]
30988			if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
30989				break
30990			}
30991			b.resetWithControl(BlockAMD64ULT, cmp)
30992			return true
30993		}
30994		// match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
30995		// result: (ULE cmp yes no)
30996		for b.Controls[0].Op == OpAMD64TESTB {
30997			v_0 := b.Controls[0]
30998			_ = v_0.Args[1]
30999			v_0_0 := v_0.Args[0]
31000			if v_0_0.Op != OpAMD64SETBE {
31001				break
31002			}
31003			cmp := v_0_0.Args[0]
31004			v_0_1 := v_0.Args[1]
31005			if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
31006				break
31007			}
31008			b.resetWithControl(BlockAMD64ULE, cmp)
31009			return true
31010		}
31011		// match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
31012		// result: (UGT cmp yes no)
31013		for b.Controls[0].Op == OpAMD64TESTB {
31014			v_0 := b.Controls[0]
31015			_ = v_0.Args[1]
31016			v_0_0 := v_0.Args[0]
31017			if v_0_0.Op != OpAMD64SETA {
31018				break
31019			}
31020			cmp := v_0_0.Args[0]
31021			v_0_1 := v_0.Args[1]
31022			if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
31023				break
31024			}
31025			b.resetWithControl(BlockAMD64UGT, cmp)
31026			return true
31027		}
31028		// match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
31029		// result: (UGE cmp yes no)
31030		for b.Controls[0].Op == OpAMD64TESTB {
31031			v_0 := b.Controls[0]
31032			_ = v_0.Args[1]
31033			v_0_0 := v_0.Args[0]
31034			if v_0_0.Op != OpAMD64SETAE {
31035				break
31036			}
31037			cmp := v_0_0.Args[0]
31038			v_0_1 := v_0.Args[1]
31039			if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
31040				break
31041			}
31042			b.resetWithControl(BlockAMD64UGE, cmp)
31043			return true
31044		}
31045		// match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
31046		// result: (OS cmp yes no)
31047		for b.Controls[0].Op == OpAMD64TESTB {
31048			v_0 := b.Controls[0]
31049			_ = v_0.Args[1]
31050			v_0_0 := v_0.Args[0]
31051			if v_0_0.Op != OpAMD64SETO {
31052				break
31053			}
31054			cmp := v_0_0.Args[0]
31055			v_0_1 := v_0.Args[1]
31056			if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
31057				break
31058			}
31059			b.resetWithControl(BlockAMD64OS, cmp)
31060			return true
31061		}
31062		// match: (NE (TESTL (SHLL (MOVLconst [1]) x) y))
31063		// result: (ULT (BTL x y))
31064		for b.Controls[0].Op == OpAMD64TESTL {
31065			v_0 := b.Controls[0]
31066			_ = v_0.Args[1]
31067			v_0_0 := v_0.Args[0]
31068			v_0_1 := v_0.Args[1]
31069			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31070				if v_0_0.Op != OpAMD64SHLL {
31071					continue
31072				}
31073				x := v_0_0.Args[1]
31074				v_0_0_0 := v_0_0.Args[0]
31075				if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
31076					continue
31077				}
31078				y := v_0_1
31079				v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
31080				v0.AddArg2(x, y)
31081				b.resetWithControl(BlockAMD64ULT, v0)
31082				return true
31083			}
31084			break
31085		}
31086		// match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y))
31087		// result: (ULT (BTQ x y))
31088		for b.Controls[0].Op == OpAMD64TESTQ {
31089			v_0 := b.Controls[0]
31090			_ = v_0.Args[1]
31091			v_0_0 := v_0.Args[0]
31092			v_0_1 := v_0.Args[1]
31093			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31094				if v_0_0.Op != OpAMD64SHLQ {
31095					continue
31096				}
31097				x := v_0_0.Args[1]
31098				v_0_0_0 := v_0_0.Args[0]
31099				if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
31100					continue
31101				}
31102				y := v_0_1
31103				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
31104				v0.AddArg2(x, y)
31105				b.resetWithControl(BlockAMD64ULT, v0)
31106				return true
31107			}
31108			break
31109		}
31110		// match: (NE (TESTLconst [c] x))
31111		// cond: isUint32PowerOfTwo(int64(c))
31112		// result: (ULT (BTLconst [int8(log32(c))] x))
31113		for b.Controls[0].Op == OpAMD64TESTLconst {
31114			v_0 := b.Controls[0]
31115			c := auxIntToInt32(v_0.AuxInt)
31116			x := v_0.Args[0]
31117			if !(isUint32PowerOfTwo(int64(c))) {
31118				break
31119			}
31120			v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31121			v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31122			v0.AddArg(x)
31123			b.resetWithControl(BlockAMD64ULT, v0)
31124			return true
31125		}
31126		// match: (NE (TESTQconst [c] x))
31127		// cond: isUint64PowerOfTwo(int64(c))
31128		// result: (ULT (BTQconst [int8(log32(c))] x))
31129		for b.Controls[0].Op == OpAMD64TESTQconst {
31130			v_0 := b.Controls[0]
31131			c := auxIntToInt32(v_0.AuxInt)
31132			x := v_0.Args[0]
31133			if !(isUint64PowerOfTwo(int64(c))) {
31134				break
31135			}
31136			v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31137			v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31138			v0.AddArg(x)
31139			b.resetWithControl(BlockAMD64ULT, v0)
31140			return true
31141		}
31142		// match: (NE (TESTQ (MOVQconst [c]) x))
31143		// cond: isUint64PowerOfTwo(c)
31144		// result: (ULT (BTQconst [int8(log64(c))] x))
31145		for b.Controls[0].Op == OpAMD64TESTQ {
31146			v_0 := b.Controls[0]
31147			_ = v_0.Args[1]
31148			v_0_0 := v_0.Args[0]
31149			v_0_1 := v_0.Args[1]
31150			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31151				if v_0_0.Op != OpAMD64MOVQconst {
31152					continue
31153				}
31154				c := auxIntToInt64(v_0_0.AuxInt)
31155				x := v_0_1
31156				if !(isUint64PowerOfTwo(c)) {
31157					continue
31158				}
31159				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31160				v0.AuxInt = int8ToAuxInt(int8(log64(c)))
31161				v0.AddArg(x)
31162				b.resetWithControl(BlockAMD64ULT, v0)
31163				return true
31164			}
31165			break
31166		}
31167		// match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
31168		// cond: z1==z2
31169		// result: (ULT (BTQconst [63] x))
31170		for b.Controls[0].Op == OpAMD64TESTQ {
31171			v_0 := b.Controls[0]
31172			_ = v_0.Args[1]
31173			v_0_0 := v_0.Args[0]
31174			v_0_1 := v_0.Args[1]
31175			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31176				z1 := v_0_0
31177				if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
31178					continue
31179				}
31180				z1_0 := z1.Args[0]
31181				if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31182					continue
31183				}
31184				x := z1_0.Args[0]
31185				z2 := v_0_1
31186				if !(z1 == z2) {
31187					continue
31188				}
31189				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31190				v0.AuxInt = int8ToAuxInt(63)
31191				v0.AddArg(x)
31192				b.resetWithControl(BlockAMD64ULT, v0)
31193				return true
31194			}
31195			break
31196		}
31197		// match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
31198		// cond: z1==z2
31199		// result: (ULT (BTQconst [31] x))
31200		for b.Controls[0].Op == OpAMD64TESTL {
31201			v_0 := b.Controls[0]
31202			_ = v_0.Args[1]
31203			v_0_0 := v_0.Args[0]
31204			v_0_1 := v_0.Args[1]
31205			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31206				z1 := v_0_0
31207				if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
31208					continue
31209				}
31210				z1_0 := z1.Args[0]
31211				if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31212					continue
31213				}
31214				x := z1_0.Args[0]
31215				z2 := v_0_1
31216				if !(z1 == z2) {
31217					continue
31218				}
31219				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31220				v0.AuxInt = int8ToAuxInt(31)
31221				v0.AddArg(x)
31222				b.resetWithControl(BlockAMD64ULT, v0)
31223				return true
31224			}
31225			break
31226		}
31227		// match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
31228		// cond: z1==z2
31229		// result: (ULT (BTQconst [0] x))
31230		for b.Controls[0].Op == OpAMD64TESTQ {
31231			v_0 := b.Controls[0]
31232			_ = v_0.Args[1]
31233			v_0_0 := v_0.Args[0]
31234			v_0_1 := v_0.Args[1]
31235			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31236				z1 := v_0_0
31237				if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31238					continue
31239				}
31240				z1_0 := z1.Args[0]
31241				if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31242					continue
31243				}
31244				x := z1_0.Args[0]
31245				z2 := v_0_1
31246				if !(z1 == z2) {
31247					continue
31248				}
31249				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31250				v0.AuxInt = int8ToAuxInt(0)
31251				v0.AddArg(x)
31252				b.resetWithControl(BlockAMD64ULT, v0)
31253				return true
31254			}
31255			break
31256		}
31257		// match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
31258		// cond: z1==z2
31259		// result: (ULT (BTLconst [0] x))
31260		for b.Controls[0].Op == OpAMD64TESTL {
31261			v_0 := b.Controls[0]
31262			_ = v_0.Args[1]
31263			v_0_0 := v_0.Args[0]
31264			v_0_1 := v_0.Args[1]
31265			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31266				z1 := v_0_0
31267				if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31268					continue
31269				}
31270				z1_0 := z1.Args[0]
31271				if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31272					continue
31273				}
31274				x := z1_0.Args[0]
31275				z2 := v_0_1
31276				if !(z1 == z2) {
31277					continue
31278				}
31279				v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31280				v0.AuxInt = int8ToAuxInt(0)
31281				v0.AddArg(x)
31282				b.resetWithControl(BlockAMD64ULT, v0)
31283				return true
31284			}
31285			break
31286		}
31287		// match: (NE (TESTQ z1:(SHRQconst [63] x) z2))
31288		// cond: z1==z2
31289		// result: (ULT (BTQconst [63] x))
31290		for b.Controls[0].Op == OpAMD64TESTQ {
31291			v_0 := b.Controls[0]
31292			_ = v_0.Args[1]
31293			v_0_0 := v_0.Args[0]
31294			v_0_1 := v_0.Args[1]
31295			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31296				z1 := v_0_0
31297				if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31298					continue
31299				}
31300				x := z1.Args[0]
31301				z2 := v_0_1
31302				if !(z1 == z2) {
31303					continue
31304				}
31305				v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31306				v0.AuxInt = int8ToAuxInt(63)
31307				v0.AddArg(x)
31308				b.resetWithControl(BlockAMD64ULT, v0)
31309				return true
31310			}
31311			break
31312		}
31313		// match: (NE (TESTL z1:(SHRLconst [31] x) z2))
31314		// cond: z1==z2
31315		// result: (ULT (BTLconst [31] x))
31316		for b.Controls[0].Op == OpAMD64TESTL {
31317			v_0 := b.Controls[0]
31318			_ = v_0.Args[1]
31319			v_0_0 := v_0.Args[0]
31320			v_0_1 := v_0.Args[1]
31321			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31322				z1 := v_0_0
31323				if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31324					continue
31325				}
31326				x := z1.Args[0]
31327				z2 := v_0_1
31328				if !(z1 == z2) {
31329					continue
31330				}
31331				v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31332				v0.AuxInt = int8ToAuxInt(31)
31333				v0.AddArg(x)
31334				b.resetWithControl(BlockAMD64ULT, v0)
31335				return true
31336			}
31337			break
31338		}
31339		// match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
31340		// result: (UGT cmp yes no)
31341		for b.Controls[0].Op == OpAMD64TESTB {
31342			v_0 := b.Controls[0]
31343			_ = v_0.Args[1]
31344			v_0_0 := v_0.Args[0]
31345			if v_0_0.Op != OpAMD64SETGF {
31346				break
31347			}
31348			cmp := v_0_0.Args[0]
31349			v_0_1 := v_0.Args[1]
31350			if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
31351				break
31352			}
31353			b.resetWithControl(BlockAMD64UGT, cmp)
31354			return true
31355		}
31356		// match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
31357		// result: (UGE cmp yes no)
31358		for b.Controls[0].Op == OpAMD64TESTB {
31359			v_0 := b.Controls[0]
31360			_ = v_0.Args[1]
31361			v_0_0 := v_0.Args[0]
31362			if v_0_0.Op != OpAMD64SETGEF {
31363				break
31364			}
31365			cmp := v_0_0.Args[0]
31366			v_0_1 := v_0.Args[1]
31367			if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
31368				break
31369			}
31370			b.resetWithControl(BlockAMD64UGE, cmp)
31371			return true
31372		}
31373		// match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
31374		// result: (EQF cmp yes no)
31375		for b.Controls[0].Op == OpAMD64TESTB {
31376			v_0 := b.Controls[0]
31377			_ = v_0.Args[1]
31378			v_0_0 := v_0.Args[0]
31379			if v_0_0.Op != OpAMD64SETEQF {
31380				break
31381			}
31382			cmp := v_0_0.Args[0]
31383			v_0_1 := v_0.Args[1]
31384			if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
31385				break
31386			}
31387			b.resetWithControl(BlockAMD64EQF, cmp)
31388			return true
31389		}
31390		// match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
31391		// result: (NEF cmp yes no)
31392		for b.Controls[0].Op == OpAMD64TESTB {
31393			v_0 := b.Controls[0]
31394			_ = v_0.Args[1]
31395			v_0_0 := v_0.Args[0]
31396			if v_0_0.Op != OpAMD64SETNEF {
31397				break
31398			}
31399			cmp := v_0_0.Args[0]
31400			v_0_1 := v_0.Args[1]
31401			if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
31402				break
31403			}
31404			b.resetWithControl(BlockAMD64NEF, cmp)
31405			return true
31406		}
31407		// match: (NE (InvertFlags cmp) yes no)
31408		// result: (NE cmp yes no)
31409		for b.Controls[0].Op == OpAMD64InvertFlags {
31410			v_0 := b.Controls[0]
31411			cmp := v_0.Args[0]
31412			b.resetWithControl(BlockAMD64NE, cmp)
31413			return true
31414		}
31415		// match: (NE (FlagEQ) yes no)
31416		// result: (First no yes)
31417		for b.Controls[0].Op == OpAMD64FlagEQ {
31418			b.Reset(BlockFirst)
31419			b.swapSuccessors()
31420			return true
31421		}
31422		// match: (NE (FlagLT_ULT) yes no)
31423		// result: (First yes no)
31424		for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31425			b.Reset(BlockFirst)
31426			return true
31427		}
31428		// match: (NE (FlagLT_UGT) yes no)
31429		// result: (First yes no)
31430		for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31431			b.Reset(BlockFirst)
31432			return true
31433		}
31434		// match: (NE (FlagGT_ULT) yes no)
31435		// result: (First yes no)
31436		for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31437			b.Reset(BlockFirst)
31438			return true
31439		}
31440		// match: (NE (FlagGT_UGT) yes no)
31441		// result: (First yes no)
31442		for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31443			b.Reset(BlockFirst)
31444			return true
31445		}
31446		// match: (NE (TESTQ s:(Select0 blsr:(BLSRQ _)) s) yes no)
31447		// result: (NE (Select1 <types.TypeFlags> blsr) yes no)
31448		for b.Controls[0].Op == OpAMD64TESTQ {
31449			v_0 := b.Controls[0]
31450			_ = v_0.Args[1]
31451			v_0_0 := v_0.Args[0]
31452			v_0_1 := v_0.Args[1]
31453			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31454				s := v_0_0
31455				if s.Op != OpSelect0 {
31456					continue
31457				}
31458				blsr := s.Args[0]
31459				if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
31460					continue
31461				}
31462				v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31463				v0.AddArg(blsr)
31464				b.resetWithControl(BlockAMD64NE, v0)
31465				return true
31466			}
31467			break
31468		}
31469		// match: (NE (TESTL s:(Select0 blsr:(BLSRL _)) s) yes no)
31470		// result: (NE (Select1 <types.TypeFlags> blsr) yes no)
31471		for b.Controls[0].Op == OpAMD64TESTL {
31472			v_0 := b.Controls[0]
31473			_ = v_0.Args[1]
31474			v_0_0 := v_0.Args[0]
31475			v_0_1 := v_0.Args[1]
31476			for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31477				s := v_0_0
31478				if s.Op != OpSelect0 {
31479					continue
31480				}
31481				blsr := s.Args[0]
31482				if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
31483					continue
31484				}
31485				v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31486				v0.AddArg(blsr)
31487				b.resetWithControl(BlockAMD64NE, v0)
31488				return true
31489			}
31490			break
31491		}
31492	case BlockAMD64UGE:
31493		// match: (UGE (TESTQ x x) yes no)
31494		// result: (First yes no)
31495		for b.Controls[0].Op == OpAMD64TESTQ {
31496			v_0 := b.Controls[0]
31497			x := v_0.Args[1]
31498			if x != v_0.Args[0] {
31499				break
31500			}
31501			b.Reset(BlockFirst)
31502			return true
31503		}
31504		// match: (UGE (TESTL x x) yes no)
31505		// result: (First yes no)
31506		for b.Controls[0].Op == OpAMD64TESTL {
31507			v_0 := b.Controls[0]
31508			x := v_0.Args[1]
31509			if x != v_0.Args[0] {
31510				break
31511			}
31512			b.Reset(BlockFirst)
31513			return true
31514		}
31515		// match: (UGE (TESTW x x) yes no)
31516		// result: (First yes no)
31517		for b.Controls[0].Op == OpAMD64TESTW {
31518			v_0 := b.Controls[0]
31519			x := v_0.Args[1]
31520			if x != v_0.Args[0] {
31521				break
31522			}
31523			b.Reset(BlockFirst)
31524			return true
31525		}
31526		// match: (UGE (TESTB x x) yes no)
31527		// result: (First yes no)
31528		for b.Controls[0].Op == OpAMD64TESTB {
31529			v_0 := b.Controls[0]
31530			x := v_0.Args[1]
31531			if x != v_0.Args[0] {
31532				break
31533			}
31534			b.Reset(BlockFirst)
31535			return true
31536		}
31537		// match: (UGE (InvertFlags cmp) yes no)
31538		// result: (ULE cmp yes no)
31539		for b.Controls[0].Op == OpAMD64InvertFlags {
31540			v_0 := b.Controls[0]
31541			cmp := v_0.Args[0]
31542			b.resetWithControl(BlockAMD64ULE, cmp)
31543			return true
31544		}
31545		// match: (UGE (FlagEQ) yes no)
31546		// result: (First yes no)
31547		for b.Controls[0].Op == OpAMD64FlagEQ {
31548			b.Reset(BlockFirst)
31549			return true
31550		}
31551		// match: (UGE (FlagLT_ULT) yes no)
31552		// result: (First no yes)
31553		for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31554			b.Reset(BlockFirst)
31555			b.swapSuccessors()
31556			return true
31557		}
31558		// match: (UGE (FlagLT_UGT) yes no)
31559		// result: (First yes no)
31560		for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31561			b.Reset(BlockFirst)
31562			return true
31563		}
31564		// match: (UGE (FlagGT_ULT) yes no)
31565		// result: (First no yes)
31566		for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31567			b.Reset(BlockFirst)
31568			b.swapSuccessors()
31569			return true
31570		}
31571		// match: (UGE (FlagGT_UGT) yes no)
31572		// result: (First yes no)
31573		for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31574			b.Reset(BlockFirst)
31575			return true
31576		}
31577	case BlockAMD64UGT:
31578		// match: (UGT (InvertFlags cmp) yes no)
31579		// result: (ULT cmp yes no)
31580		for b.Controls[0].Op == OpAMD64InvertFlags {
31581			v_0 := b.Controls[0]
31582			cmp := v_0.Args[0]
31583			b.resetWithControl(BlockAMD64ULT, cmp)
31584			return true
31585		}
31586		// match: (UGT (FlagEQ) yes no)
31587		// result: (First no yes)
31588		for b.Controls[0].Op == OpAMD64FlagEQ {
31589			b.Reset(BlockFirst)
31590			b.swapSuccessors()
31591			return true
31592		}
31593		// match: (UGT (FlagLT_ULT) yes no)
31594		// result: (First no yes)
31595		for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31596			b.Reset(BlockFirst)
31597			b.swapSuccessors()
31598			return true
31599		}
31600		// match: (UGT (FlagLT_UGT) yes no)
31601		// result: (First yes no)
31602		for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31603			b.Reset(BlockFirst)
31604			return true
31605		}
31606		// match: (UGT (FlagGT_ULT) yes no)
31607		// result: (First no yes)
31608		for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31609			b.Reset(BlockFirst)
31610			b.swapSuccessors()
31611			return true
31612		}
31613		// match: (UGT (FlagGT_UGT) yes no)
31614		// result: (First yes no)
31615		for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31616			b.Reset(BlockFirst)
31617			return true
31618		}
31619	case BlockAMD64ULE:
31620		// match: (ULE (InvertFlags cmp) yes no)
31621		// result: (UGE cmp yes no)
31622		for b.Controls[0].Op == OpAMD64InvertFlags {
31623			v_0 := b.Controls[0]
31624			cmp := v_0.Args[0]
31625			b.resetWithControl(BlockAMD64UGE, cmp)
31626			return true
31627		}
31628		// match: (ULE (FlagEQ) yes no)
31629		// result: (First yes no)
31630		for b.Controls[0].Op == OpAMD64FlagEQ {
31631			b.Reset(BlockFirst)
31632			return true
31633		}
31634		// match: (ULE (FlagLT_ULT) yes no)
31635		// result: (First yes no)
31636		for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31637			b.Reset(BlockFirst)
31638			return true
31639		}
31640		// match: (ULE (FlagLT_UGT) yes no)
31641		// result: (First no yes)
31642		for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31643			b.Reset(BlockFirst)
31644			b.swapSuccessors()
31645			return true
31646		}
31647		// match: (ULE (FlagGT_ULT) yes no)
31648		// result: (First yes no)
31649		for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31650			b.Reset(BlockFirst)
31651			return true
31652		}
31653		// match: (ULE (FlagGT_UGT) yes no)
31654		// result: (First no yes)
31655		for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31656			b.Reset(BlockFirst)
31657			b.swapSuccessors()
31658			return true
31659		}
31660	case BlockAMD64ULT:
31661		// match: (ULT (TESTQ x x) yes no)
31662		// result: (First no yes)
31663		for b.Controls[0].Op == OpAMD64TESTQ {
31664			v_0 := b.Controls[0]
31665			x := v_0.Args[1]
31666			if x != v_0.Args[0] {
31667				break
31668			}
31669			b.Reset(BlockFirst)
31670			b.swapSuccessors()
31671			return true
31672		}
31673		// match: (ULT (TESTL x x) yes no)
31674		// result: (First no yes)
31675		for b.Controls[0].Op == OpAMD64TESTL {
31676			v_0 := b.Controls[0]
31677			x := v_0.Args[1]
31678			if x != v_0.Args[0] {
31679				break
31680			}
31681			b.Reset(BlockFirst)
31682			b.swapSuccessors()
31683			return true
31684		}
31685		// match: (ULT (TESTW x x) yes no)
31686		// result: (First no yes)
31687		for b.Controls[0].Op == OpAMD64TESTW {
31688			v_0 := b.Controls[0]
31689			x := v_0.Args[1]
31690			if x != v_0.Args[0] {
31691				break
31692			}
31693			b.Reset(BlockFirst)
31694			b.swapSuccessors()
31695			return true
31696		}
31697		// match: (ULT (TESTB x x) yes no)
31698		// result: (First no yes)
31699		for b.Controls[0].Op == OpAMD64TESTB {
31700			v_0 := b.Controls[0]
31701			x := v_0.Args[1]
31702			if x != v_0.Args[0] {
31703				break
31704			}
31705			b.Reset(BlockFirst)
31706			b.swapSuccessors()
31707			return true
31708		}
31709		// match: (ULT (InvertFlags cmp) yes no)
31710		// result: (UGT cmp yes no)
31711		for b.Controls[0].Op == OpAMD64InvertFlags {
31712			v_0 := b.Controls[0]
31713			cmp := v_0.Args[0]
31714			b.resetWithControl(BlockAMD64UGT, cmp)
31715			return true
31716		}
31717		// match: (ULT (FlagEQ) yes no)
31718		// result: (First no yes)
31719		for b.Controls[0].Op == OpAMD64FlagEQ {
31720			b.Reset(BlockFirst)
31721			b.swapSuccessors()
31722			return true
31723		}
31724		// match: (ULT (FlagLT_ULT) yes no)
31725		// result: (First yes no)
31726		for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31727			b.Reset(BlockFirst)
31728			return true
31729		}
31730		// match: (ULT (FlagLT_UGT) yes no)
31731		// result: (First no yes)
31732		for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31733			b.Reset(BlockFirst)
31734			b.swapSuccessors()
31735			return true
31736		}
31737		// match: (ULT (FlagGT_ULT) yes no)
31738		// result: (First yes no)
31739		for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31740			b.Reset(BlockFirst)
31741			return true
31742		}
31743		// match: (ULT (FlagGT_UGT) yes no)
31744		// result: (First no yes)
31745		for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31746			b.Reset(BlockFirst)
31747			b.swapSuccessors()
31748			return true
31749		}
31750	}
31751	return false
31752}
31753