Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 13 additions & 11 deletions .ci/benchmarks/huggingface_models_list.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,27 +17,36 @@ DistilBertForQuestionAnswering,512
DistillGPT2,32
ElectraForCausalLM,64
ElectraForQuestionAnswering,128
GoogleFnet,32
google/gemma-2-2b,8
google/gemma-3-4b-it,8
GPT2ForSequenceClassification,8
GPTJForCausalLM,1
GPTJForQuestionAnswering,1
GPTNeoForCausalLM,32
GPTNeoForSequenceClassification,32
GoogleFnet,32
LayoutLMForMaskedLM,32
LayoutLMForSequenceClassification,32
M2M100ForConditionalGeneration,64
MBartForCausalLM,8
MBartForConditionalGeneration,4
MT5ForConditionalGeneration,32
MegatronBertForCausalLM,16
MegatronBertForQuestionAnswering,16
meta-llama/Llama-3.2-1B,8
mistralai/Mistral-7B-Instruct-v0.3,8
mistralai/Mistral-7B-Instruct-v0.3,8
MobileBertForMaskedLM,256
MobileBertForQuestionAnswering,256
MT5ForConditionalGeneration,32
openai/gpt-oss-20b,8
openai/gpt-oss-20b,8
openai/whisper-tiny,8
OPTForCausalLM,4
PLBartForCausalLM,16
PLBartForConditionalGeneration,8
PegasusForCausalLM,128
PegasusForConditionalGeneration,64
PLBartForCausalLM,16
PLBartForConditionalGeneration,8
Qwen/Qwen3-0.6B,8
RobertaForCausalLM,32
RobertaForQuestionAnswering,32
T5ForConditionalGeneration,8
Expand All @@ -46,10 +55,3 @@ TrOCRForCausalLM,64
XGLMForCausalLM,32
XLNetLMHeadModel,16
YituTechConvBert,32
meta-llama/Llama-3.2-1B,8
google/gemma-2-2b,8
google/gemma-3-4b-it,8
openai/whisper-tiny,8
Qwen/Qwen3-0.6B,8
mistralai/Mistral-7B-Instruct-v0.3,8
openai/gpt-oss-20b,8
8 changes: 4 additions & 4 deletions .ci/benchmarks/timm_models_list.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,11 @@ coat_lite_mini 128
convit_base 128
convmixer_768_32 64
convnext_base 128
convnextv2_nano.fcmae_ft_in22k_in1k 128
crossvit_9_240 256
cspdarknet53 128
deit_base_distilled_patch16_224 128
deit_tiny_patch16_224.fb_in1k 128
dla102 128
dm_nfnet_f0 128
dpn107 64
Expand Down Expand Up @@ -56,10 +58,8 @@ tinynet_a 128
tnt_s_patch16_224 128
twins_pcpvt_base 128
visformer_small 128
vit_base_patch14_dinov2.lvd142m 128
vit_base_patch16_224 128
vit_base_patch16_siglip_256 128
volo_d1_224 128
xcit_large_24_p8_224 16
convnextv2_nano.fcmae_ft_in22k_in1k
deit_tiny_patch16_224.fb_in1k
vit_base_patch14_dinov2.lvd142m
vit_base_patch16_siglip_256
5 changes: 4 additions & 1 deletion .ci/benchmarks/torchbench.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ tolerance:
- doctr_reco_predictor
- drq
- phlippe_resnet
- pytorch_CycleGAN_and_pix2pix

higher_bf16:
- doctr_reco_predictor
Expand Down Expand Up @@ -154,7 +155,7 @@ trt_not_yet_working:
skip:
all:
# OOMs (A100 40G)
- detectron2_maskrcnn
# - detectron2_maskrcnn
# TIMEOUT, https://github.com/pytorch/pytorch/issues/98467
- tacotron2
# Failing in eager mode
Expand Down Expand Up @@ -216,6 +217,8 @@ skip:

cuda: []

xpu: []

test:
training:
- *DETECTRON2_MODELS
Expand Down