python - How to modify model and weight values after post-quantization in tf-lite
问题描述
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
try:
import tensorflow.compat.v2 as tf
except Exception:
pass
tf.enable_v2_behavior()
from tensorflow import keras
import numpy as np
import pathlib
# Train and export the model
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels)
)
# Convert to a TensorFlow Lite model
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
# Convert using quantization
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
mnist_train, _ = tf.keras.datasets.mnist.load_data()
images = tf.cast(mnist_train[0], tf.float32) / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)
def representative_data_gen():
for input_value in mnist_ds.take(100):
yield [input_value]
converter.representative_dataset = representative_data_gen
tflite_model_quant = converter.convert()
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite"
tflite_model_quant_file.write_bytes(tflite_model_quant)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model_quant = converter.convert()
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant_io.tflite"
tflite_model_quant_file.write_bytes(tflite_model_quant)
# Run the TensorFlow Lite models
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_quant_file))
interpreter_quant.allocate_tensors()
input_index_quant = interpreter_quant.get_input_details()[0]["index"]
output_index_quant = interpreter_quant.get_output_details()[0]["index"]
# # Load TFLite model and allocate tensors.
# interpreter = tf.lite.Interpreter(model_path=str(tflite_model_quant_file))
# interpreter.allocate_tensors()
# _________________________________ get_tensor______________________
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# get details for each layer
all_layers_details = interpreter_quant.get_tensor_details()
# ___________________________________________________________________
weight1 = interpreter_quant.get_tensor(2)
weight2 = interpreter_quant.get_tensor(6)
# Example of modification:
for val in weight1:
if val> th:
val = 0
interpreter_quant.set_tensor(all_layers_details[2]['index'], weight1)
# Evaluate the models
# A helper function to evaluate the TF Lite model using "test" dataset.
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for test_image in test_images:
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
accurate_count = 0
for index in range(len(prediction_digits)):
if prediction_digits[index] == test_labels[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction_digits)
return accuracy
print(evaluate_model(interpreter))
# NOTE: Colab runs on server CPUs, and TensorFlow Lite currently
# doesn't have super optimized server CPU kernels. So this part may be
# slower than the above float interpreter. But for mobile CPUs, considerable
# speedup can be observed.
print(evaluate_model(interpreter_quant))
I am using Tensorflow Lite to train and quantize a simple network on MNIST. Here is an example from Tensorflow Lite documentation: https://www.tensorflow.org/lite/performance/post_training_integer_quant However in my method I want to modify and change some of the weight and model values after quantization and before testing the model. I know there is two command "get_tensor" and "set_tensor" to read and write to the tensors, however, it seems that "set_tensor" is just work to load and modify input. Is there any way that I can modify weights' value in TF-Lite? Thanks for your guidance. Here is the code: I got the tensor and called it weight1 and modified weight1. Then I want to assign it back to the quantized model. I used "set_tensor" but I got this error: Process finished with exit code 138 (interrupted by signal 10: SIGBUS)
解决方案
推荐阅读
- python - 使用子进程避免 GIL
- java - Kafka Listener - 在 Spring Boot 中配置拦截器?
- c - 比较用户在 C 中给出的三个数字
- assembly - 访问 MASM 中的位
- python - 尝试按月对数据框进行排序时重新索引导致 NaN 值
- typescript - 我使用的是哪个版本的打字稿,全局的还是本地的?
- css - css div到完整的视口高度并且没有滚动条
- javascript - 自定义 v-data-table 标题并保留默认功能(排序)
- reactjs - 我如何测试使用效果周期?看看和一张桌子
- c# - 通过 SignalR HubContext 从集线器所在的项目中的方法发送消息