Skip to content

Commit

Permalink
Fix (notebooks): remove double parenthesis and other style fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
fabianandresgrob committed Feb 19, 2024
1 parent f379925 commit f1b04a5
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 152 deletions.
2 changes: 1 addition & 1 deletion notebooks/02_quant_activation_overview.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@
"source": [
"return_disabled_quant_relu = QuantReLU(act_quant=None, return_quant_tensor=True)\n",
"relu_out_tensor = return_disabled_quant_relu(out_tensor)\n",
"assert_with_message((relu_out_tensor.is_valid==True))\n",
"assert_with_message(relu_out_tensor.is_valid)\n",
"assert_with_message(relu_out_tensor.scale == out_tensor.scale)\n",
"assert_with_message(relu_out_tensor.zero_point == out_tensor.zero_point)\n",
"assert_with_message(relu_out_tensor.bit_width == out_tensor.bit_width)"
Expand Down
34 changes: 18 additions & 16 deletions notebooks/Brevitas_TVMCon2021.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@
"from brevitas.nn import QuantLinear\n",
"from IPython.display import Markdown, display\n",
"\n",
"# helpers\n",
"def assert_with_message(condition):\n",
" assert condition\n",
" print(condition)\n",
"\n",
"def pretty_print_source(source):\n",
" display(Markdown('```python\\n' + source + '\\n```'))\n",
" \n",
Expand Down Expand Up @@ -866,7 +871,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/tmp/ipykernel_61612/661358273.py:7: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1699449183005/work/torch/csrc/utils/python_arg_parser.cpp:368.)\n",
"/tmp/ipykernel_86658/661358273.py:7: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1699449183005/work/torch/csrc/utils/python_arg_parser.cpp:368.)\n",
" quant_output = torch.tanh(quant_input)\n"
]
}
Expand Down Expand Up @@ -919,9 +924,9 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/tmp/ipykernel_61612/3932472163.py:8: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1699449183005/work/torch/csrc/utils/python_arg_parser.cpp:368.)\n",
"/tmp/ipykernel_86658/3932472163.py:8: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1699449183005/work/torch/csrc/utils/python_arg_parser.cpp:368.)\n",
" train_mode_cat = torch.cat([quant_identity(float_inp1), quant_identity(float_inp2)], dim=1)\n",
"/tmp/ipykernel_61612/3932472163.py:14: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1699449183005/work/torch/csrc/utils/python_arg_parser.cpp:368.)\n",
"/tmp/ipykernel_86658/3932472163.py:14: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1699449183005/work/torch/csrc/utils/python_arg_parser.cpp:368.)\n",
" eval_mode_cat = torch.cat([eval_quant_inp1, eval_quant_inp2], dim=1)\n"
]
}
Expand Down Expand Up @@ -1813,14 +1818,11 @@
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 36,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"True\n"
]
}
],
"source": [
Expand All @@ -1836,7 +1838,7 @@
"quant_identity_bit_width = quant_identity.act_quant.fused_activation_quant_proxy.tensor_quant.msb_clamp_bit_width_impl\n",
"quant_linear_bit_width = quant_linear.weight_quant.tensor_quant.msb_clamp_bit_width_impl\n",
"\n",
"quant_identity_bit_width is quant_linear_bit_width"
"assert_with_message(quant_identity_bit_width is quant_linear_bit_width)"
]
},
{
Expand Down Expand Up @@ -2160,7 +2162,7 @@
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7f8f925ffcd0>"
"<IPython.lib.display.IFrame at 0x7fb23f9e1550>"
]
},
"execution_count": 40,
Expand Down Expand Up @@ -2555,7 +2557,7 @@
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7f8f9255ff10>"
"<IPython.lib.display.IFrame at 0x7fb208193710>"
]
},
"execution_count": 42,
Expand Down Expand Up @@ -2801,7 +2803,7 @@
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7f8f925ee7d0>"
"<IPython.lib.display.IFrame at 0x7fb208174650>"
]
},
"execution_count": 44,
Expand Down Expand Up @@ -2895,7 +2897,7 @@
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7f8f92604090>"
"<IPython.lib.display.IFrame at 0x7fb208192e50>"
]
},
"execution_count": 46,
Expand Down
168 changes: 33 additions & 135 deletions notebooks/quantized_recurrent.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -146,18 +146,15 @@
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"False"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"True\n"
]
}
],
"source": [
"quant_rnn_0_left_to_right.gate_params.input_weight.weight_quant is quant_rnn_1_right_to_left.gate_params.input_weight.weight_quant"
"assert_with_message(not quant_rnn_0_left_to_right.gate_params.input_weight.weight_quant is quant_rnn_1_right_to_left.gate_params.input_weight.weight_quant)"
]
},
{
Expand All @@ -166,18 +163,15 @@
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"False"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"True\n"
]
}
],
"source": [
"quant_rnn_0_left_to_right.cell.gate_acc_quant is quant_rnn_1_right_to_left.cell.gate_acc_quant"
"assert_with_message(not quant_rnn_0_left_to_right.cell.gate_acc_quant is quant_rnn_1_right_to_left.cell.gate_acc_quant)"
]
},
{
Expand All @@ -186,18 +180,15 @@
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"False"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"True\n"
]
}
],
"source": [
"quant_rnn_0_left_to_right.gate_params.bias_quant is quant_rnn_1_right_to_left.gate_params.bias_quant"
"assert_with_message(not quant_rnn_0_left_to_right.gate_params.bias_quant is quant_rnn_1_right_to_left.gate_params.bias_quant)"
]
},
{
Expand All @@ -214,18 +205,15 @@
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"True\n"
]
}
],
"source": [
"quant_rnn_0_left_to_right.io_quant is quant_rnn_1_right_to_left.io_quant"
"assert_with_message(quant_rnn_0_left_to_right.io_quant is quant_rnn_1_right_to_left.io_quant)"
]
},
{
Expand Down Expand Up @@ -966,7 +954,7 @@
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7fc849b718d0>"
"<IPython.lib.display.IFrame at 0x7fd09064fb90>"
]
},
"execution_count": 19,
Expand Down Expand Up @@ -995,7 +983,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"2024-02-15 10:20:15.222259670 [W:onnxruntime:, graph.cc:1283 Graph] Initializer onnx::LSTM_93 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
"2024-02-19 02:23:40.184678458 [W:onnxruntime:, graph.cc:1283 Graph] Initializer onnx::LSTM_93 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
]
}
],
Expand Down Expand Up @@ -1042,37 +1030,7 @@
"skip-execution"
]
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Serving 'quant_lstm_weight_only_cifg_4b.onnx' at http://localhost:8082\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"100%\"\n",
" height=\"400\"\n",
" src=\"http://localhost:8082/\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" \n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7fc84aae6750>"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"show_netron(export_path, 8082)"
]
Expand All @@ -1094,7 +1052,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"2024-02-15 10:20:22.930760716 [W:onnxruntime:, graph.cc:1283 Graph] Initializer onnx::LSTM_87 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
"2024-02-19 02:23:47.615422725 [W:onnxruntime:, graph.cc:1283 Graph] Initializer onnx::LSTM_87 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
]
}
],
Expand Down Expand Up @@ -1150,37 +1108,7 @@
"skip-execution"
]
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Serving 'quant_lstm_weight_only_bidirectional_2_layers.onnx' at http://localhost:8083\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"100%\"\n",
" height=\"400\"\n",
" src=\"http://localhost:8083/\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" \n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7fc848d0a810>"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"show_netron(export_path, 8083)"
]
Expand Down Expand Up @@ -1226,37 +1154,7 @@
"skip-execution"
]
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Serving 'quant_lstm_weight_only_bidirectional_2_layers_shared_ih.onnx' at http://localhost:8085\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"100%\"\n",
" height=\"400\"\n",
" src=\"http://localhost:8085/\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" \n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7fc848d08f10>"
]
},
"execution_count": 27,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"show_netron(export_path, 8085)"
]
Expand Down Expand Up @@ -1326,7 +1224,7 @@
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7fc96dabe810>"
"<IPython.lib.display.IFrame at 0x7fd090675e50>"
]
},
"execution_count": 29,
Expand Down Expand Up @@ -1404,7 +1302,7 @@
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7fc848d0e790>"
"<IPython.lib.display.IFrame at 0x7fd08b279090>"
]
},
"execution_count": 31,
Expand Down Expand Up @@ -1482,7 +1380,7 @@
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7fc848836410>"
"<IPython.lib.display.IFrame at 0x7fd08b255c50>"
]
},
"execution_count": 33,
Expand Down

0 comments on commit f1b04a5

Please sign in to comment.