-
Notifications
You must be signed in to change notification settings - Fork 975
NXP backend: Test max_pool2d with new Neutron flow.
#19272
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -6,7 +6,6 @@ | |
| import operator | ||
|
|
||
| import numpy as np | ||
| import pytest | ||
| import torch | ||
|
|
||
| from executorch.backends.nxp.backend.edge_program_converter import ( | ||
|
|
@@ -19,7 +18,13 @@ | |
| ToChannelFirstPreprocess, | ||
| ToChannelLastPreprocess, | ||
| ) | ||
| from executorch.backends.nxp.tests.graph_verifier import ( | ||
| BaseGraphVerifier, | ||
| NonDelegatedNode, | ||
| ) | ||
| from executorch.backends.nxp.tests.nsys_testing import lower_run_compare | ||
| from executorch.backends.nxp.tests.use_qat import * # noqa F403 | ||
| import pytest | ||
|
|
||
| # noinspection PyProtectedMember | ||
| from executorch.exir.dialects._ops import ops as exir_ops | ||
|
|
@@ -47,7 +52,7 @@ def forward(self, x): | |
|
|
||
|
|
||
| class MaxPool2dModule(torch.nn.Module): | ||
| def __init__(self, kernel_size=3, **kwargs): | ||
| def __init__(self, kernel_size: int | tuple[int, ...] = 3, **kwargs): | ||
| super().__init__() | ||
| self.max_pool2d = torch.nn.MaxPool2d(kernel_size, **kwargs) | ||
|
|
||
|
|
@@ -250,3 +255,124 @@ def test_max_pool_2d__from_1d(self, mocker): | |
| tflite_input_preprocess=ToChannelLastPreprocess(), | ||
| tflite_output_preprocess=ToChannelFirstPreprocess(), | ||
| ) | ||
|
|
||
|
|
||
| class TestMaxPool2DNewNeutronFlow: | ||
| # noinspection PyMethodMayBeStatic | ||
| def assert_delegated(self, model, input_shape): | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I would move these methods to a separate file so they can be used in other tests as well. When I start implementing new neutron flow tests, I would have to copy this into my test suite and that seems unnecessary.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These functions are not universal. This one checks for 1 delegated node and no non-delegated nodes. For example MaxPool1D tests have it different. |
||
| graph_verifier = BaseGraphVerifier( | ||
| exp_num_delegate_call_nodes=1, # Delegated MaxPool. | ||
| exp_non_delegated_nodes=[], | ||
| ) | ||
|
|
||
| lower_run_compare( | ||
| model, input_shape, graph_verifier, use_new_flow_neutron_c=True | ||
| ) | ||
|
|
||
| # noinspection PyMethodMayBeStatic | ||
| def assert_not_delegated(self, model, input_shape): | ||
|
MartinPavella marked this conversation as resolved.
|
||
| delegated_ep = to_quantized_edge_program( | ||
| model, input_shape, use_new_flow_neutron_c=True | ||
| ).exported_program() | ||
|
|
||
| # Make sure the `max_pool2d` was NOT delegated. | ||
| assert not graph_contains_any_of_ops( | ||
| delegated_ep.graph, [ExecutorchDelegateCall] | ||
| ) | ||
| assert graph_contains_any_of_ops(delegated_ep.graph, [MaxPool2D]) | ||
|
|
||
| def test__basic_nsys_inference(self): | ||
| input_shape = (2, 4, 6, 7) # The old flow limited the batch size to 1. | ||
| model = MaxPool2dModule() | ||
| self.assert_delegated(model, input_shape) | ||
|
|
||
| def test__kernel_size_limit(self): | ||
| kernel_size = (1, 4096) | ||
| input_shape = (1, 4) + kernel_size | ||
|
MartinPavella marked this conversation as resolved.
|
||
| model = MaxPool2dModule(kernel_size) | ||
| self.assert_delegated(model, input_shape) | ||
|
|
||
| def test__kernel_size_limit_exceeded(self): | ||
| kernel_size = (1, 4097) # Exceeds the kernel size limit. | ||
| input_shape = (1, 4) + kernel_size | ||
| model = MaxPool2dModule(kernel_size) | ||
| self.assert_not_delegated(model, input_shape) | ||
|
|
||
| def test__stride_limit__no_padding(self): | ||
| stride = 4096 | ||
| input_shape = (1, 4, 1, 4096) | ||
| model = MaxPool2dModule(1, stride=stride) | ||
| self.assert_delegated(model, input_shape) | ||
|
|
||
| def test__stride_limit_exceeded__no_padding(self): | ||
| stride = 4097 # Exceeds the stride limit. | ||
| input_shape = (1, 4, 1, 4096) | ||
| model = MaxPool2dModule(1, stride=stride) | ||
| self.assert_not_delegated(model, input_shape) | ||
|
|
||
| def test__stride_limit__padding(self): | ||
| padding = 1 | ||
| stride = 4096 | ||
| input_shape = (1, 2, 3, stride) | ||
| model = MaxPool2dModule(3, stride=stride, padding=padding) | ||
| self.assert_delegated(model, input_shape) | ||
|
|
||
| def test__stride_limit_exceeded__padding(self): | ||
| padding = 1 | ||
| stride = 4097 # Exceeds the stride limit. | ||
| input_shape = (1, 2, 3, stride) | ||
| model = MaxPool2dModule(3, stride=stride, padding=padding) | ||
| self.assert_not_delegated(model, input_shape) | ||
|
|
||
| @pytest.mark.skip( | ||
| reason="Large padding requires large kernel size which results in an extremely slow test." | ||
| ) | ||
| def test__padding_limit(self): | ||
| # As the padding is added wia a `Pad` operator (not the `MaxPool` arguments), there is no limit to the padded | ||
| # value. But as padding can be at most half of the kernel size (PyTorch requirement) and kernel size is limited | ||
| # to 4096, padding of 2048 is the limit. | ||
| padding = 2048 | ||
| kernel_size = padding * 2 | ||
| input_shape = (1, 1, 2, 3) | ||
| model = MaxPool2dModule(kernel_size, padding=padding) | ||
| self.assert_delegated(model, input_shape) | ||
|
|
||
| def test__padding__max_pool_limit_exceeded(self): | ||
| # NeutronIR `MaxPool` padding is limited to 32. But as it is added by the `Pad` operator instead, there is no | ||
| # limit. This tests ensures the `MaxPool` padding limit is not a problem. | ||
| padding = 33 | ||
| kernel_size = padding * 2 | ||
| input_shape = (1, 2, 3, 4) | ||
| model = MaxPool2dModule(kernel_size, padding=padding) | ||
| self.assert_delegated(model, input_shape) | ||
|
|
||
| def test__padding_to_kernel_ratio_exceeded(self): | ||
| # Both PyTorch and Neutron require the padding to be at most half of the kernel size. | ||
| kernel_size = 3 | ||
| padding = 2 # More than half of the kernel size. | ||
| input_shape = (1, 2, 3, 4) | ||
| model = MaxPool2dModule(kernel_size, padding=padding) | ||
| with pytest.raises( | ||
| RuntimeError, match="pad should be at most half of effective kernel size" | ||
| ): | ||
| to_quantized_edge_program(model, input_shape, use_new_flow_neutron_c=True) | ||
|
|
||
|
|
||
| class TestMaxPool1DNewNeutronFlow: | ||
|
|
||
| # Just a basic test to verify that the operator gets extended to the 2D variant correctly. | ||
| def test__basic_nsys_inference__view_not_delegated(self): | ||
| input_shape = (2, 4, 6) # The old flow limited the batch size to 1. | ||
| model = MaxPool1DModule() | ||
| graph_verifier = BaseGraphVerifier( | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I would add checking for the
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not with the |
||
| exp_num_delegate_call_nodes=1, # Delegated MaxPool. | ||
| exp_non_delegated_nodes=[ | ||
| NonDelegatedNode( | ||
| "aten_view_copy_default", 2 | ||
| ) # Non delegated due to shape requirements. | ||
| ], | ||
| ) | ||
|
|
||
| lower_run_compare( | ||
| model, input_shape, graph_verifier, use_new_flow_neutron_c=True | ||
| ) | ||
Uh oh!
There was an error while loading. Please reload this page.