转自AI Studio,原文链接:

基于winform的CycleGAN部署 - 飞桨AI Studio

项目背景

很多小伙伴训练好image-to-image translation模型后无法将其成功部署,本项目使用已训练好的CycleGAN参数为例,通过将参数转换为ONNX协议,使用winform成功将其部署于windows系统上,形成了一个可视化的image-to-image translation界面,便于大家使用已训练好的参数模型。

实际效果展示

   

1 训练模型

CycleGAN模型参考文档: 一文搞懂GAN的风格迁移之CycleGAN - 飞桨AI Studio

本文提供已训练好的CycleGAN模型参数,用于青花瓷器线框图的转换

2 将paddle模型导出为ONNX协议

ONNX (Open Neural Network Exchange) 是针对机器学习所设计的开源文件格式,用于存储训练好的模型。它使得不同的人工智能框架可以采用相同格式存储模型并交互。通过ONNX格式,Paddle模型可以使用OpenVINO、ONNX Runtime等框架进行推理。

In [ ]

#安装相关python包
!pip install paddle2onnx
!pip install onnxruntime

In [9]

#导入python包
import paddle
import paddle.nn as nn
import numpy as np
import paddle
import onnx
import numpy as np
from PIL import Image
import onnxruntime
from paddle.io import Dataset, DataLoader, IterableDataset
import numpy as np
import cv2
import random
import time
from paddle.static import InputSpec
import warnings
import matplotlib.pyplot as plt
%matplotlib inline
from numpy import *
warnings.filterwarnings("ignore", category=Warning) # 过滤报警信息
BATCH_SIZE = 1
# PLACE = paddle.CPUPlace() # 在cpu上训练
#PLACE = paddle.CUDAPlace(0)  # 在gpu上训练
# 定义基础的“卷积层+实例归一化”块

In [2]

#CycleGAN模型代码
class ConvIN(nn.Layer):
    def __init__(self, num_channels, num_filters, filter_size, stride=1, padding=1, bias_attr=None, 
        weight_attr=None):
        super(ConvIN, self).__init__()
        model = [
            nn.Conv2D(num_channels, num_filters, filter_size, stride=stride, padding=padding, 
                bias_attr=bias_attr, weight_attr=weight_attr),
            nn.InstanceNorm2D(num_filters),
            nn.LeakyReLU(negative_slope=0.2)
        ]
        self.model = nn.Sequential(*model)
    def forward(self, x):
        return self.model(x)

# 定义CycleGAN的判别器
class Disc(nn.Layer):
    def __init__(self, weight_attr=nn.initializer.Normal(0., 0.02)):
        super(Disc, self).__init__()
        model = [
            ConvIN(3, 64, 4, stride=2, padding=1, bias_attr=True, weight_attr=weight_attr),
            ConvIN(64, 128, 4, stride=2, padding=1, bias_attr=False, weight_attr=weight_attr),
            ConvIN(128, 256, 4, stride=2, padding=1, bias_attr=False, weight_attr=weight_attr),
            ConvIN(256, 512, 4, stride=1, padding=1, bias_attr=False, weight_attr=weight_attr),
            nn.Conv2D(512, 1, 4, stride=1, padding=1, bias_attr=True, weight_attr=weight_attr)
        ]
        self.model = nn.Sequential(*model)
    def forward(self, x):
        return self.model(x)
# 定义基础的“转置卷积层+实例归一化”块
class ConvTransIN(nn.Layer):
    def __init__(self, num_channels, num_filters, filter_size, stride=1, padding='same', padding_mode='constant', 
        bias_attr=None, weight_attr=None):
        super(ConvTransIN, self).__init__()
        model = [
            nn.Conv2DTranspose(num_channels, num_filters, filter_size, stride=stride, padding=padding, 
                bias_attr=bias_attr, weight_attr=weight_attr),
            nn.InstanceNorm2D(num_filters),
            nn.LeakyReLU(negative_slope=0.2)
        ]
        self.model = nn.Sequential(*model)
    def forward(self, x):
        return self.model(x)

# 定义残差块
class Residual(nn.Layer):
    def __init__(self, dim, bias_attr=None, weight_attr=None):
        super(Residual, self).__init__()
        model = [
            nn.Conv2D(dim, dim, 3, stride=1, padding=1, padding_mode='reflect', bias_attr=bias_attr, 
                weight_attr=weight_attr),
            nn.InstanceNorm2D(dim),
            nn.LeakyReLU(negative_slope=0.2),
        ]
        self.model = nn.Sequential(*model)
    def forward(self, x):
        return x + self.model(x)

# 定义CycleGAN的生成器
class Gen(nn.Layer):
    def __init__(self, base_dim=64, residual_num=7, downup_layer=2, weight_attr=nn.initializer.Normal(0., 0.02)):
        super(Gen, self).__init__()
        model=[
            nn.Conv2D(3, base_dim, 7, stride=1, padding=3, padding_mode='reflect', bias_attr=False, 
                weight_attr=weight_attr),
            nn.InstanceNorm2D(base_dim),
            nn.LeakyReLU(negative_slope=0.2)
        ]
        # 下采样块(down sampling)
        for i in range(downup_layer):
            model += [
                ConvIN(base_dim * 2 ** i, base_dim * 2 ** (i + 1), 3, stride=2, padding=1, bias_attr=False, 
                    weight_attr=weight_attr),
            ]
        # 残差块(residual blocks)
        for i in range(residual_num):
            model += [
                Residual(base_dim * 2 ** downup_layer, True, weight_attr=nn.initializer.Normal(0., 0.02))
            ]
        # 上采样块(up sampling)
        for i in range(downup_layer):
            model += [
                ConvTransIN(base_dim * 2 ** (downup_layer - i), base_dim * 2 ** (downup_layer - i - 1), 3, 
                    stride=2, padding='same', padding_mode='constant', bias_attr=False, weight_attr=weight_attr),
            ]
        model += [
            nn.Conv2D(base_dim, 3, 7, stride=1, padding=3, padding_mode='reflect', bias_attr=True, 
                weight_attr=nn.initializer.Normal(0., 0.02)),
            nn.Tanh()
        ]
        self.model = nn.Sequential(*model)
    def forward(self, x):
        return self.model(x)

In [5]

layer = Gen()#模型实例化
G_a_p=paddle.load("Blue_and_white_porcelain.pdparams")#载入已训练好的模型参数
layer.set_state_dict(G_a_p)
save_path = 'Blue_and_white_porcelain'
x_spec = InputSpec([None,3,224,224], 'float32', 'x')#此为模型训练时图片的shape
paddle.onnx.export(layer, save_path, input_spec=[x_spec])#转换为onnx协议
2022-05-13 22:49:57 [INFO]	ONNX model saved in Blue_and_white_porcelain.onnx

3 测试转换的模型

In [7]

def data_transform(img, output_size):
    h, w, _ = img.shape
    #assert h == w and h >= output_size # check picture size
    # random crop
    img = cv2.resize(img, (224,224), interpolation=cv2.INTER_AREA)  
    # normalize
    img = img / 255. * 2. - 1.
    # from [H,W,C] to [C,H,W]
    img = np.transpose(img, (2, 0, 1))
    # data type
    img = img.astype('float32')    
    return img

def save_pics(pics, file_name='tmp'):
    for i in range(len(pics)):
        pics[i] = pics[i][0]
    pic = np.concatenate(tuple(pics), axis=2)
    pic = pic.transpose((1,2,0))
    pic = (pic + 1) / 2
    # plt.imshow(pic)
    pic = np.clip(pic * 256, 0, 255)
    img = Image.fromarray(pic.astype('uint8')).convert('RGB')
    img.save("bule_"+file_name)

In [10]

img_a = cv2.imread("011.jpg")
img_a = cv2.cvtColor(img_a, cv2.COLOR_BGR2RGB)
img_a = data_transform(img_a, 224)
x=img_a.reshape(-1,3,224,224)
ort_sess = onnxruntime.InferenceSession('Blue_and_white_porcelain.onnx')
ort_inputs = {ort_sess.get_inputs()[0].name: x}
ort_outs = ort_sess.run(None, ort_inputs)
img=ort_outs[0]
save_pics([img],file_name="011.jpg")

4 模型部署

在winform中所调用的py文件

import matplotlib.pyplot as plt
import cv2
import sys
import numpy as np
import onnxruntime
from PIL import Image
def data_transform(img, output_size):
    h, w, _ = img.shape
    #assert h == w and h >= output_size # check picture size
    # random crop
    img = cv2.resize(img, (224,224), interpolation=cv2.INTER_AREA)
    # normalize
    img = img / 255. * 2. - 1.
    # from [H,W,C] to [C,H,W]
    img = np.transpose(img, (2, 0, 1))
    # data type
    img = img.astype('float32')
    return img

def save_pics(pics, file_name='tmp'):
    for i in range(len(pics)):
        pics[i] = pics[i][0]
    pic = np.concatenate(tuple(pics), axis=2)
    pic = pic.transpose((1,2,0))
    pic = (pic + 1) / 2
    # plt.imshow(pic)
    pic = np.clip(pic * 256, 0, 255)
    img = Image.fromarray(pic.astype('uint8')).convert('RGB')
    img.save(file_name[:-4]+"_blue.jpg")

def blue(img_path):
    img_a = cv2.imread(img_path)
    img_a = cv2.cvtColor(img_a, cv2.COLOR_BGR2RGB)
    img_a = data_transform(img_a, 224)
    x=img_a.reshape(-1,3,224,224)
    ort_sess = onnxruntime.InferenceSession('D:/Blue_and_white_porcelain.onnx')#此为存放onnx模型参数的路径
    ort_inputs = {ort_sess.get_inputs()[0].name: x}
    ort_outs = ort_sess.run(None, ort_inputs)
    img=ort_outs[0]
    save_pics([img],file_name=img_path)

print(blue(sys.argv[1]))

c#中部分关键代码

本人已将完整代码上传至左侧文件夹,可下载至本地运行

In [ ]

 

请点击此处查看本环境基本用法.
Please click here for more detailed instructions.

Logo

学大模型,用大模型上飞桨星河社区!每天8点V100G算力免费领!免费领取ERNIE 4.0 100w Token >>>

更多推荐