Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions examples/arm/aot_arm_compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,19 @@ def forward(self, x: torch.Tensor, y: torch.Tensor):
can_delegate = True


class QuantLinearTest(torch.nn.Module):
def __init__(self):
super().__init__()
# Define a simple linear layer
self.linear = torch.nn.Linear(61, 37)

def forward(self, x):
return self.linear(x)

example_input = (torch.randn([8, 61], dtype=torch.float32),)
can_delegate = True


models = {
"add": AddModule,
"add2": AddModule2,
Expand All @@ -306,6 +319,9 @@ def forward(self, x: torch.Tensor, y: torch.Tensor):
"qops": QuantOpTest,
"softmax": SoftmaxModule,
"MultipleOutputsModule": MultipleOutputsModule,
# TODO: Remove this from here, once we have dedicated MCU test pipeline ready. This is an interim solution.
# See https://github.com/pytorch/executorch/discussions/13944
"qlinear": QuantLinearTest,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add a TODO for more tests, link the tester RFC thing from @AdrianLundell here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

}

calibration_data = {
Expand All @@ -330,6 +346,7 @@ def forward(self, x: torch.Tensor, y: torch.Tensor):
torch.randn(32, 2, 1) * 1000,
),
"softmax": (torch.randn(32, 2, 2),),
"qlinear": (torch.randn(37, 61),),
}

evaluators = {
Expand Down
5 changes: 3 additions & 2 deletions examples/arm/run_mcu_models_fvp.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ VALID_TARGETS=(
)

# Default models for MCU validation with portable kernels
DEFAULT_MODELS=(mv2 mv3 lstm)
DEFAULT_MODELS=(mv2 mv3 lstm qadd qlinear)
# Available models (on FVP)
AVAILABLE_MODELS=(mv2 mv3 lstm)
AVAILABLE_MODELS=(mv2 mv3 lstm qadd qlinear)
# Add the following models if you want to enable them later (atm they are not working on FVP)
# edsr w2l ic3 ic4 resnet18 resnet50

Expand Down Expand Up @@ -257,6 +257,7 @@ for model in "${MODELS[@]}"; do
-m "$model" \
--target="$ETHOS_TARGET" \
--quantize \
--enable_qdq_fusion_pass \
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

always enable?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Few internal CI tests were failing, I will fix them first and then enable

--output="arm_test/$model"; then
echo "❌ AOT compilation failed for $model"
MODEL_SUCCESS=false
Expand Down
Loading