Lines Matching defs:vq
14 __vector_quad vq = *((__vector_quad *)vqp);
49 __vector_quad vq = *((__vector_quad *)vqp);
79 __vector_quad vq = *((__vector_quad *)vqp);
81 __builtin_mma_xxmtacc(&vq);
82 *((__vector_quad *)resp) = vq;
93 __vector_quad vq = *((__vector_quad *)vqp);
95 __builtin_mma_xxmfacc(&vq);
96 *((__vector_quad *)resp) = vq;
106 __vector_quad vq = *((__vector_quad *)vqp);
108 __builtin_mma_xxsetaccz(&vq);
109 *((__vector_quad *)resp) = vq;
119 __vector_quad vq = *((__vector_quad *)vqp);
121 __builtin_mma_xvi4ger8(&vq, vc, vc);
122 *((__vector_quad *)resp) = vq;
132 __vector_quad vq = *((__vector_quad *)vqp);
134 __builtin_mma_xvi8ger4(&vq, vc, vc);
135 *((__vector_quad *)resp) = vq;
145 __vector_quad vq = *((__vector_quad *)vqp);
147 __builtin_mma_xvi16ger2(&vq, vc, vc);
148 *((__vector_quad *)resp) = vq;
158 __vector_quad vq = *((__vector_quad *)vqp);
160 __builtin_mma_xvi16ger2s(&vq, vc, vc);
161 *((__vector_quad *)resp) = vq;
171 __vector_quad vq = *((__vector_quad *)vqp);
173 __builtin_mma_xvf16ger2(&vq, vc, vc);
174 *((__vector_quad *)resp) = vq;
184 __vector_quad vq = *((__vector_quad *)vqp);
186 __builtin_mma_xvf32ger(&vq, vc, vc);
187 *((__vector_quad *)resp) = vq;
198 __vector_quad vq = *((__vector_quad *)vqp);
200 __builtin_mma_xvf64ger(&vq, vp, vc);
201 *((__vector_quad *)resp) = vq;
211 __vector_quad vq = *((__vector_quad *)vqp);
213 __builtin_mma_pmxvi4ger8(&vq, vc, vc, 0, 0, 0);
214 *((__vector_quad *)resp) = vq;
224 __vector_quad vq = *((__vector_quad *)vqp);
226 __builtin_mma_pmxvi8ger4(&vq, vc, vc, 0, 0, 0);
227 *((__vector_quad *)resp) = vq;
237 __vector_quad vq = *((__vector_quad *)vqp);
239 __builtin_mma_pmxvi16ger2(&vq, vc, vc, 0, 0, 0);
240 *((__vector_quad *)resp) = vq;
250 __vector_quad vq = *((__vector_quad *)vqp);
252 __builtin_mma_pmxvi16ger2s(&vq, vc, vc, 0, 0, 0);
253 *((__vector_quad *)resp) = vq;
263 __vector_quad vq = *((__vector_quad *)vqp);
265 __builtin_mma_pmxvf16ger2(&vq, vc, vc, 0, 0, 0);
266 *((__vector_quad *)resp) = vq;
276 __vector_quad vq = *((__vector_quad *)vqp);
278 __builtin_mma_pmxvf32ger(&vq, vc, vc, 0, 0);
279 *((__vector_quad *)resp) = vq;
290 __vector_quad vq = *((__vector_quad *)vqp);
292 __builtin_mma_pmxvf64ger(&vq, vp, vc, 0, 0);
293 *((__vector_quad *)resp) = vq;
304 __vector_quad vq = *((__vector_quad *)vqp);
306 __builtin_mma_xvi4ger8pp(&vq, vc, vc);
307 *((__vector_quad *)resp) = vq;
318 __vector_quad vq = *((__vector_quad *)vqp);
320 __builtin_mma_xvi8ger4pp(&vq, vc, vc);
321 *((__vector_quad *)resp) = vq;
332 __vector_quad vq = *((__vector_quad *)vqp);
334 __builtin_mma_xvi8ger4spp(&vq, vc, vc);
335 *((__vector_quad *)resp) = vq;
346 __vector_quad vq = *((__vector_quad *)vqp);
348 __builtin_mma_xvi16ger2pp(&vq, vc, vc);
349 *((__vector_quad *)resp) = vq;
360 __vector_quad vq = *((__vector_quad *)vqp);
362 __builtin_mma_xvi16ger2spp(&vq, vc, vc);
363 *((__vector_quad *)resp) = vq;
374 __vector_quad vq = *((__vector_quad *)vqp);
376 __builtin_mma_pmxvi4ger8pp(&vq, vc, vc, 0, 0, 0);
377 *((__vector_quad *)resp) = vq;
388 __vector_quad vq = *((__vector_quad *)vqp);
390 __builtin_mma_pmxvi8ger4pp(&vq, vc, vc, 0, 0, 0);
391 *((__vector_quad *)resp) = vq;
402 __vector_quad vq = *((__vector_quad *)vqp);
404 __builtin_mma_pmxvi8ger4spp(&vq, vc, vc, 0, 0, 0);
405 *((__vector_quad *)resp) = vq;
416 __vector_quad vq = *((__vector_quad *)vqp);
418 __builtin_mma_pmxvi16ger2pp(&vq, vc, vc, 0, 0, 0);
419 *((__vector_quad *)resp) = vq;
430 __vector_quad vq = *((__vector_quad *)vqp);
432 __builtin_mma_pmxvi16ger2spp(&vq, vc, vc, 0, 0, 0);
433 *((__vector_quad *)resp) = vq;
444 __vector_quad vq = *((__vector_quad *)vqp);
446 __builtin_mma_xvf16ger2pp(&vq, vc, vc);
447 *((__vector_quad *)resp) = vq;
458 __vector_quad vq = *((__vector_quad *)vqp);
460 __builtin_mma_xvf16ger2pn(&vq, vc, vc);
461 *((__vector_quad *)resp) = vq;
472 __vector_quad vq = *((__vector_quad *)vqp);
474 __builtin_mma_xvf16ger2np(&vq, vc, vc);
475 *((__vector_quad *)resp) = vq;
486 __vector_quad vq = *((__vector_quad *)vqp);
488 __builtin_mma_xvf16ger2nn(&vq, vc, vc);
489 *((__vector_quad *)resp) = vq;
500 __vector_quad vq = *((__vector_quad *)vqp);
502 __builtin_mma_pmxvf16ger2pp(&vq, vc, vc, 0, 0, 0);
503 *((__vector_quad *)resp) = vq;
514 __vector_quad vq = *((__vector_quad *)vqp);
516 __builtin_mma_pmxvf16ger2pn(&vq, vc, vc, 0, 0, 0);
517 *((__vector_quad *)resp) = vq;
528 __vector_quad vq = *((__vector_quad *)vqp);
530 __builtin_mma_pmxvf16ger2np(&vq, vc, vc, 0, 0, 0);
531 *((__vector_quad *)resp) = vq;
542 __vector_quad vq = *((__vector_quad *)vqp);
544 __builtin_mma_pmxvf16ger2nn(&vq, vc, vc, 0, 0, 0);
545 *((__vector_quad *)resp) = vq;
556 __vector_quad vq = *((__vector_quad *)vqp);
558 __builtin_mma_xvf32gerpp(&vq, vc, vc);
559 *((__vector_quad *)resp) = vq;
570 __vector_quad vq = *((__vector_quad *)vqp);
572 __builtin_mma_xvf32gerpn(&vq, vc, vc);
573 *((__vector_quad *)resp) = vq;
584 __vector_quad vq = *((__vector_quad *)vqp);
586 __builtin_mma_xvf32gernp(&vq, vc, vc);
587 *((__vector_quad *)resp) = vq;
598 __vector_quad vq = *((__vector_quad *)vqp);
600 __builtin_mma_xvf32gernn(&vq, vc, vc);
601 *((__vector_quad *)resp) = vq;
612 __vector_quad vq = *((__vector_quad *)vqp);
614 __builtin_mma_pmxvf32gerpp(&vq, vc, vc, 0, 0);
615 *((__vector_quad *)resp) = vq;
626 __vector_quad vq = *((__vector_quad *)vqp);
628 __builtin_mma_pmxvf32gerpn(&vq, vc, vc, 0, 0);
629 *((__vector_quad *)resp) = vq;
640 __vector_quad vq = *((__vector_quad *)vqp);
642 __builtin_mma_pmxvf32gernp(&vq, vc, vc, 0, 0);
643 *((__vector_quad *)resp) = vq;
654 __vector_quad vq = *((__vector_quad *)vqp);
656 __builtin_mma_pmxvf32gernn(&vq, vc, vc, 0, 0);
657 *((__vector_quad *)resp) = vq;
669 __vector_quad vq = *((__vector_quad *)vqp);
671 __builtin_mma_xvf64gerpp(&vq, vp, vc);
672 *((__vector_quad *)resp) = vq;
684 __vector_quad vq = *((__vector_quad *)vqp);
686 __builtin_mma_xvf64gerpn(&vq, vp, vc);
687 *((__vector_quad *)resp) = vq;
699 __vector_quad vq = *((__vector_quad *)vqp);
701 __builtin_mma_xvf64gernp(&vq, vp, vc);
702 *((__vector_quad *)resp) = vq;
714 __vector_quad vq = *((__vector_quad *)vqp);
716 __builtin_mma_xvf64gernn(&vq, vp, vc);
717 *((__vector_quad *)resp) = vq;
729 __vector_quad vq = *((__vector_quad *)vqp);
731 __builtin_mma_pmxvf64gerpp(&vq, vp, vc, 0, 0);
732 *((__vector_quad *)resp) = vq;
744 __vector_quad vq = *((__vector_quad *)vqp);
746 __builtin_mma_pmxvf64gerpn(&vq, vp, vc, 0, 0);
747 *((__vector_quad *)resp) = vq;
759 __vector_quad vq = *((__vector_quad *)vqp);
761 __builtin_mma_pmxvf64gernp(&vq, vp, vc, 0, 0);
762 *((__vector_quad *)resp) = vq;
774 __vector_quad vq = *((__vector_quad *)vqp);
776 __builtin_mma_pmxvf64gernn(&vq, vp, vc, 0, 0);
777 *((__vector_quad *)resp) = vq;
787 __vector_quad vq = *((__vector_quad *)vqp);
789 __builtin_mma_xvbf16ger2(&vq, vc, vc);
790 *((__vector_quad *)resp) = vq;
800 __vector_quad vq = *((__vector_quad *)vqp);
802 __builtin_mma_pmxvbf16ger2(&vq, vc, vc, 0, 0, 0);
803 *((__vector_quad *)resp) = vq;
814 __vector_quad vq = *((__vector_quad *)vqp);
816 __builtin_mma_xvbf16ger2pp(&vq, vc, vc);
817 *((__vector_quad *)resp) = vq;
828 __vector_quad vq = *((__vector_quad *)vqp);
830 __builtin_mma_xvbf16ger2pn(&vq, vc, vc);
831 *((__vector_quad *)resp) = vq;
842 __vector_quad vq = *((__vector_quad *)vqp);
844 __builtin_mma_xvbf16ger2np(&vq, vc, vc);
845 *((__vector_quad *)resp) = vq;
856 __vector_quad vq = *((__vector_quad *)vqp);
858 __builtin_mma_xvbf16ger2nn(&vq, vc, vc);
859 *((__vector_quad *)resp) = vq;
870 __vector_quad vq = *((__vector_quad *)vqp);
872 __builtin_mma_pmxvbf16ger2pp(&vq, vc, vc, 0, 0, 0);
873 *((__vector_quad *)resp) = vq;
884 __vector_quad vq = *((__vector_quad *)vqp);
886 __builtin_mma_pmxvbf16ger2pn(&vq, vc, vc, 0, 0, 0);
887 *((__vector_quad *)resp) = vq;
898 __vector_quad vq = *((__vector_quad *)vqp);
900 __builtin_mma_pmxvbf16ger2np(&vq, vc, vc, 0, 0, 0);
901 *((__vector_quad *)resp) = vq;
912 __vector_quad vq = *((__vector_quad *)vqp);
914 __builtin_mma_pmxvbf16ger2nn(&vq, vc, vc, 0, 0, 0);
915 *((__vector_quad *)resp) = vq;
1017 __vector_quad vq = *((__vector_quad *)vqp);
1019 __builtin_mma_pmxvf64gernn(&vq, vp, vc, 0, 0);
1020 *((__vector_quad *)resp) = vq;
1032 __vector_quad vq = *((__vector_quad *)vqp);
1034 __builtin_mma_xvf64gernp(&vq, vp, vc);
1035 *((__vector_quad *)resp) = vq;
1048 __vector_quad vq = *((__vector_quad *)vqp);
1050 __builtin_mma_xvf64gernp(&vq, vp, vc);
1051 *((__vector_quad *)resp) = vq;
1061 __vector_quad vq = *((__vector_quad *)vqp);
1182 __vector_quad vq = *((__vector_quad *)vqp);
1184 __builtin_mma_pmxvf64gernn(&vq, vp, vc, 0, 0);
1185 *((__vector_quad *)resp) = vq;
1197 __vector_quad vq = *((__vector_quad *)vqp);
1199 __builtin_mma_xvf64gernp(&vq, vp, vc);
1200 *((__vector_quad *)resp) = vq;
1213 __vector_quad vq = *((__vector_quad *)vqp);
1215 __builtin_mma_xvf64gernp(&vq, vp, vc);
1216 *((__vector_quad *)resp) = vq;