Skip to content

Commit c968ab1

Browse files
committed
Able to run quantized model but outputs are wrong
1 parent 8561198 commit c968ab1

File tree

3 files changed

+47
-3
lines changed

3 files changed

+47
-3
lines changed

samples/modules/executorch/arm-fvp-tutorials/models/mv2/prj.conf

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ CONFIG_CBPRINTF_FP_SUPPORT=y
2727
CONFIG_CODE_DATA_RELOCATION=y
2828

2929
# Add model specific configs, mv2 needs at least ~11 MB, quantized needs 12MB?
30-
CONFIG_EXECUTORCH_METHOD_ALLOCATOR_POOL_SIZE=11534336
31-
CONFIG_EXECUTORCH_INCLUDE_ARM_QUANTIZE_LIBS=n
32-
#CONFIG_EXECUTORCH_METHOD_ALLOCATOR_POOL_SIZE=12582912
30+
#CONFIG_EXECUTORCH_METHOD_ALLOCATOR_POOL_SIZE=11534336
31+
CONFIG_EXECUTORCH_INCLUDE_ARM_QUANTIZE_LIBS=y
32+
CONFIG_EXECUTORCH_METHOD_ALLOCATOR_POOL_SIZE=12582912
3333

samples/modules/executorch/arm-fvp-tutorials/models/mv2/src/mv2_output_verifier.cpp

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,48 @@ MV2OutputVerifier::_get_class(size_t idx, char *name_buf, size_t buf_len) {
120120
}
121121
}
122122
}
123+
void
124+
MV2OutputVerifier::_print(std::vector<executorch::runtime::EValue>& outputs) {
125+
// Print the outputs.
126+
ET_LOG(Info, "Printing outputs.");
127+
for (int i = 0; i < outputs.size(); ++i) {
128+
if (outputs[i].isTensor()) {
129+
executorch::aten::Tensor tensor = outputs[i].toTensor();
130+
// The output might be collected and parsed so printf() is used instead
131+
// of ET_LOG() here
132+
for (int j = 0; j < tensor.numel(); ++j) {
133+
if (tensor.scalar_type() == executorch::aten::ScalarType::Int) {
134+
printf(
135+
"Output[%d][%d]: (int) %d\n",
136+
i,
137+
j,
138+
tensor.const_data_ptr<int>()[j]);
139+
} else if (tensor.scalar_type() == executorch::aten::ScalarType::Float) {
140+
printf(
141+
"Output[%d][%d]: (float) %f\n",
142+
i,
143+
j,
144+
tensor.const_data_ptr<float>()[j]);
145+
} else if (tensor.scalar_type() == executorch::aten::ScalarType::Char) {
146+
printf(
147+
"Output[%d][%d]: (char) %d\n",
148+
i,
149+
j,
150+
tensor.const_data_ptr<int8_t>()[j]);
151+
} else if (tensor.scalar_type() == executorch::aten::ScalarType::Bool) {
152+
printf(
153+
"Output[%d][%d]: (bool) %s (0x%x)\n",
154+
i,
155+
j,
156+
tensor.const_data_ptr<int8_t>()[j] ? "true " : "false",
157+
tensor.const_data_ptr<int8_t>()[j]);
158+
}
159+
}
160+
} else {
161+
printf("Output[%d]: Not Tensor\n", i);
162+
}
163+
}
164+
}
123165

124166
int
125167
MV2OutputVerifier::verify(std::vector<executorch::runtime::EValue>& outputs) {

samples/modules/executorch/arm-fvp-tutorials/models/mv2/src/mv2_output_verifier.hpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,10 @@ class MV2OutputVerifier : public OutputVerifier {
1717
int verify(std::vector<executorch::runtime::EValue>& outputs);
1818
private:
1919
float _max(std::vector<executorch::runtime::EValue>& outputs);
20+
void _print(std::vector<executorch::runtime::EValue>& outputs);
2021
void _softmax_in_place(std::vector<executorch::runtime::EValue>& outputs, float max_val);
2122
void _top_k(std::vector<executorch::runtime::EValue>& in_probs, size_t k, std::vector<float>& out_probs, std::vector<size_t>& out_idxs);
2223
void _get_class(size_t idx, char *name_buf, size_t buf_len);
24+
2325
};
2426
#endif //__MV2_OUTPUT_VERIFICATION_H__

0 commit comments

Comments
 (0)