@@ -721,12 +721,17 @@ class vfnrclip : public function_base
721
721
722
722
rtx expand (function_expander &e) const override
723
723
{
724
- if (e.op_info ->op == OP_TYPE_f_qf )
725
- {
726
- return e.use_exact_insn (
724
+ if (e.op_info ->op == OP_TYPE_x_f_qf )
725
+ {
726
+ return e.use_exact_insn (
727
727
code_for_pred_fnr_clip (ZERO_EXTEND, e.vector_mode ()));
728
- gcc_unreachable ();
729
- }
728
+ }
729
+ if (e.op_info ->op == OP_TYPE_xu_f_qf)
730
+ {
731
+ return e.use_exact_insn (
732
+ code_for_pred_fnr_clip_scalar (ZERO_EXTEND, e.vector_mode ()));
733
+ }
734
+ gcc_unreachable ();
730
735
}
731
736
};
732
737
@@ -908,10 +913,10 @@ class vqmacc : public function_base
908
913
{
909
914
if (e.op_info ->op == OP_TYPE_4x8x4)
910
915
return e.use_widen_ternop_insn (
911
- code_for_pred_quad_mul_plus_qoq (SIGN_EXTEND, e.vector_mode ()));
916
+ code_for_pred_matrix_mul_plus_qoq (SIGN_EXTEND, e.vector_mode ()));
912
917
if (e.op_info ->op == OP_TYPE_2x8x2)
913
918
return e.use_widen_ternop_insn (
914
- code_for_pred_quad_mul_plus_dod (SIGN_EXTEND, e.vector_mode ()));
919
+ code_for_pred_matrix_mul_plus_dod (SIGN_EXTEND, e.vector_mode ()));
915
920
gcc_unreachable ();
916
921
}
917
922
};
@@ -930,10 +935,10 @@ class vqmaccu : public function_base
930
935
{
931
936
if (e.op_info ->op == OP_TYPE_4x8x4)
932
937
return e.use_widen_ternop_insn (
933
- code_for_pred_quad_mul_plus_qoq (ZERO_EXTEND, e.vector_mode ()));
938
+ code_for_pred_matrix_mul_plus_qoq (ZERO_EXTEND, e.vector_mode ()));
934
939
if (e.op_info ->op == OP_TYPE_2x8x2)
935
940
return e.use_widen_ternop_insn (
936
- code_for_pred_quad_mul_plus_dod (SIGN_EXTEND, e.vector_mode ()));
941
+ code_for_pred_matrix_mul_plus_dod (SIGN_EXTEND, e.vector_mode ()));
937
942
gcc_unreachable ();
938
943
}
939
944
};
@@ -952,10 +957,10 @@ class vqmaccsu : public function_base
952
957
{
953
958
if (e.op_info ->op == OP_TYPE_4x8x4)
954
959
return e.use_widen_ternop_insn (
955
- code_for_pred_quad_mul_plussu_qoq (e.vector_mode ()));
960
+ code_for_pred_matrix_mul_plussu_qoq (e.vector_mode ()));
956
961
if (e.op_info ->op == OP_TYPE_2x8x2)
957
962
return e.use_widen_ternop_insn (
958
- code_for_pred_quad_mul_plussu_dod (e.vector_mode ()));
963
+ code_for_pred_matrix_mul_plussu_dod (e.vector_mode ()));
959
964
gcc_unreachable ();
960
965
}
961
966
};
@@ -974,10 +979,10 @@ class vqmaccus : public function_base
974
979
{
975
980
if (e.op_info ->op == OP_TYPE_4x8x4)
976
981
return e.use_widen_ternop_insn (
977
- code_for_pred_quad_mul_plusus_qoq (e.vector_mode ()));
982
+ code_for_pred_matrix_mul_plusus_qoq (e.vector_mode ()));
978
983
if (e.op_info ->op == OP_TYPE_2x8x2)
979
984
return e.use_widen_ternop_insn (
980
- code_for_pred_quad_mul_plusus_dod (e.vector_mode ()));
985
+ code_for_pred_matrix_mul_plusus_dod (e.vector_mode ()));
981
986
gcc_unreachable ();
982
987
}
983
988
};
@@ -2682,8 +2687,8 @@ static CONSTEXPR const sat_op<UNSPEC_VSSRL> vssrl_obj;
2682
2687
static CONSTEXPR const sat_op<UNSPEC_VSSRA> vssra_obj;
2683
2688
static CONSTEXPR const vnclip<UNSPEC_VNCLIP> vnclip_obj;
2684
2689
static CONSTEXPR const vnclip<UNSPEC_VNCLIPU> vnclipu_obj;
2685
- static CONSTEXPR const vfnrclip x_obj ;
2686
- static CONSTEXPR const vfnrclip xu_obj ;
2690
+ static CONSTEXPR const vfnrclip sf_vfnrclip_x_obj ;
2691
+ static CONSTEXPR const vfnrclip sf_vfnrclip_xu_obj ;
2687
2692
static CONSTEXPR const mask_logic<AND> vmand_obj;
2688
2693
static CONSTEXPR const mask_nlogic<AND> vmnand_obj;
2689
2694
static CONSTEXPR const mask_notlogic<AND> vmandn_obj;
@@ -2873,10 +2878,10 @@ static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VSUXB> vs
2873
2878
static CONSTEXPR const th_loadstore_width<true , LST_INDEXED, UNSPEC_TH_VSUXH> vsuxh_obj;
2874
2879
static CONSTEXPR const th_loadstore_width<true , LST_INDEXED, UNSPEC_TH_VSUXW> vsuxw_obj;
2875
2880
static CONSTEXPR const th_extract vext_x_v_obj;
2876
- static CONSTEXPR const vqmacc vqmacc_obj ;
2877
- static CONSTEXPR const vqmaccu vqmaccu_obj ;
2878
- static CONSTEXPR const vqmaccsu vqmaccsu_obj ;
2879
- static CONSTEXPR const vqmaccsu vqmaccus_obj ;
2881
+ static CONSTEXPR const vqmacc sf_vqmacc_obj ;
2882
+ static CONSTEXPR const vqmaccu sf_vqmaccu_obj ;
2883
+ static CONSTEXPR const vqmaccsu sf_vqmaccsu_obj ;
2884
+ static CONSTEXPR const vqmaccsu sf_vqmaccus_obj ;
2880
2885
2881
2886
/* Crypto Vector */
2882
2887
static CONSTEXPR const vandn vandn_obj;
@@ -3018,8 +3023,8 @@ BASE (vssra)
3018
3023
BASE (vssrl)
3019
3024
BASE (vnclip)
3020
3025
BASE (vnclipu)
3021
- BASE (x )
3022
- BASE (xu )
3026
+ BASE (sf_vfnrclip_x )
3027
+ BASE (sf_vfnrclip_xu )
3023
3028
BASE (vmand)
3024
3029
BASE (vmnand)
3025
3030
BASE (vmandn)
@@ -3209,10 +3214,10 @@ BASE (vsuxb)
3209
3214
BASE (vsuxh)
3210
3215
BASE (vsuxw)
3211
3216
BASE (vext_x_v)
3212
- BASE (vqmacc )
3213
- BASE (vqmaccu )
3214
- BASE (vqmaccsu )
3215
- BASE (vqmaccus )
3217
+ BASE (sf_vqmacc )
3218
+ BASE (sf_vqmaccu )
3219
+ BASE (sf_vqmaccsu )
3220
+ BASE (sf_vqmaccus )
3216
3221
/* Crypto vector */
3217
3222
BASE (vandn)
3218
3223
BASE (vbrev)
0 commit comments