TensorFlow1 pb 推理

TensorFlow1 pb 推理#

参考:migrating_checkpoints

下面以模型 resnet_v2_50 为例展示。

需要克隆项目 models,然后执行如下操作。

import tensorflow as tf
try:
    tf1 = tf.compat.v1
except (ImportError, AttributeError): 
    tf1 = tf
2024-01-06 18:32:24.578169: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-01-06 18:32:24.722915: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
2024-01-06 18:32:24.722945: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
2024-01-06 18:32:24.758001: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-01-06 18:32:25.501636: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory
2024-01-06 18:32:25.501734: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory
2024-01-06 18:32:25.501747: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.

切换到 models/research/slim 目录下:

%cd /media/pc/data/lxw/ai/tasks/models/research/slim
/media/pc/data/lxw/ai/tasks/models/research/slim

图像预处理:

import tensorflow as tf


def preprocessing(
    image,
    use_grayscale=False,
    central_fraction=0.875,
    central_crop=True,
    height=299,
    width=299,
    mean: tuple[float, ...] = (0.485, 0.456, 0.406),
    std: tuple[float, ...] = (1, 1, 1)
):
    # image = tf.constant(image)
    if image.dtype != tf.float32:
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    if use_grayscale:
        image = tf.image.rgb_to_grayscale(image)
    if central_crop and central_fraction:
        image = tf.image.central_crop(image, central_fraction=central_fraction)
    if height and width:
        image = tf.expand_dims(image, 0)
        image = tf.image.resize(image, [height, width],
                                method='bilinear',
                                preserve_aspect_ratio=False,
                                antialias=False)
        image = tf.squeeze(image, [0])
    image = tf.subtract(image, mean)
    image = tf.divide(image, std)
    return image

将 ckpt 模型转换为 pb 模型:

import tensorflow as tf
try:
    tf1 = tf.compat.v1
except (ImportError, AttributeError):
    tf1 = tf
from PIL import Image
import numpy as np
from nets import resnet_v2
import tf_slim as slim
import shutil
from tvm_book.data.classification import ImageFolderDataset

def remove_dir(path):
    try:
        shutil.rmtree(path)
    except:
        ...
tf.get_logger().setLevel('ERROR')

# 预处理
root = "/media/pc/data/lxw/home/data/datasets/ILSVRC/val"
valset = ImageFolderDataset(root)
image, label_id = valset[1001]
import set_env # 加载 TVM
import tvm.relay.testing.tf as tf_testing
model_dir = 'temp/resnet_v2_50'
remove_dir(model_dir)
checkpoints_path = tf_testing.get_workload_official(
    "http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz",
    "resnet_v2_50.ckpt"
)
with tf1.Graph().as_default() as graph:
    processed_image = preprocessing(
        image,
        use_grayscale=False,
        central_fraction=0.875,
        central_crop=True,
        height=299,
        width=299,
        mean=(0.485, 0.456, 0.406),
        std=(1, 1, 1)
    )
    processed_images  = tf.expand_dims(processed_image, 0)
    # 创建模型时,使用默认的参数范围(arg scope)来配置批归一化(batch norm)参数。
    with slim.arg_scope(resnet_v2.resnet_arg_scope()):
        logits, end_points = resnet_v2.resnet_v2_50(processed_images, num_classes=1001,
                                                    global_pool=True,
                                                    is_training=False)
    probabilities = tf.nn.softmax(logits)
    variables = slim.get_model_variables('resnet_v2_50')
    init_fn = slim.assign_from_checkpoint_fn(checkpoints_path, variables)
    with tf1.Session() as sess:
        init_fn(sess)
        # np_probabilities, np_processed_images = sess.run([probabilities, processed_images])
        np_probabilities = sess.run(probabilities)
        tf1.saved_model.simple_save(
            sess, model_dir,
            inputs={'inputs': processed_images},
            outputs={'output': probabilities}
        )
/media/pc/data/tmp/cache/conda/envs/tvmz/lib/python3.10/site-packages/tensorflow/python/keras/engine/base_layer_v1.py:1694: UserWarning: `layer.apply` is deprecated and will be removed in a future version. Please use `layer.__call__` method instead.
  warnings.warn('`layer.apply` is deprecated and '
2024-01-06 18:33:42.431432: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
2024-01-06 18:33:42.431543: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcublas.so.11'; dlerror: libcublas.so.11: cannot open shared object file: No such file or directory
2024-01-06 18:33:42.431629: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcublasLt.so.11'; dlerror: libcublasLt.so.11: cannot open shared object file: No such file or directory
2024-01-06 18:33:42.438100: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcusparse.so.11'; dlerror: libcusparse.so.11: cannot open shared object file: No such file or directory
2024-01-06 18:33:42.438164: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1934] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.
Skipping registering GPU devices...
2024-01-06 18:33:42.438711: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-01-06 18:33:42.474363: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:354] MLIR V1 optimization pass is not enabled

加载保存的模型:

import tensorflow as tf
try:
    tf1 = tf.compat.v1
except (ImportError, AttributeError):
    tf1 = tf
from PIL import Image
import numpy as np
tf.get_logger().setLevel('ERROR')

# 预处理
root = "/media/pc/data/lxw/home/data/datasets/ILSVRC/val"
valset = ImageFolderDataset(root)
image, label_id = valset[1001]
model_dir = 'temp/resnet_v2_50'
# remove_dir(model_dir)
processed_image = preprocessing(
    image,
    use_grayscale=False,
    central_fraction=0.875,
    central_crop=True,
    height=299,
    width=299,
    mean=(0.485, 0.456, 0.406),
    std=(1, 1, 1)
)
np_processed_images = np.expand_dims(processed_image.numpy(), axis=0)
# 加载模型
loaded_model = tf.saved_model.load(model_dir)
loaded_model = loaded_model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
out = loaded_model(tf.constant(np_processed_images))['output'].numpy()

打印标签信息:

from tvm_book.data.imagenet.classification import ImageNet1kAttr

imagenet1k_attr = ImageNet1kAttr()
sorted_inds = np_probabilities[0].argsort()[::-1]
topk = 5
print(f"真实标签:{imagenet1k_attr.classes_long[label_id]}")
for sorted_ind in sorted_inds[:topk]:
    label = imagenet1k_attr.classes_long[sorted_ind-1]
    print(f"{sorted_ind-1}: {label.ljust(38)}\t{np_probabilities[0, sorted_ind]}")