★★★ 本文源自AlStudio社区精品项目,【点击此处】查看更多精品内容 >>>
和车牌检测项目搭配:https://aistudio.baidu.com/aistudio/projectdetail/5557065

车牌识别

LPRNet端到端训练车牌识别

本项目包括

  1. 完整训练推理过程
  2. 模型转onnx以及onnx的检查和推理

数据集构建

# 数据解压
!unzip -o -q -d /home/aistudio/data /home/aistudio/data/data17968/CCPD2019.zip
import cv2
import os
import numpy as np
from tqdm.notebook import tqdm 

# 参考 https://blog.csdn.net/qq_36516958/article/details/114274778
from PIL import Image
# CCPD车牌有重复,应该是不同角度或者模糊程度
path = r'data/ccpd_base'  # 改成自己的车牌路径


provinces = ["皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "O"]
alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
             'X', 'Y', 'Z', 'O']
ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
       'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']

save_path = 'rec_images/data/'
if not os.path.exists(save_path):
    os.makedirs(save_path)

num = 0
for filename in tqdm(os.listdir(path)):
    num += 1
    result = ""
    _, _, box, points, plate, brightness, blurriness = filename.split('-')
    list_plate = plate.split('_')  # 读取车牌
    result += provinces[int(list_plate[0])]
    result += alphabets[int(list_plate[1])]
    result += ads[int(list_plate[2])] + ads[int(list_plate[3])] + ads[int(list_plate[4])] + ads[int(list_plate[5])] + ads[int(list_plate[6])]
    # 新能源车牌的要求,如果不是新能源车牌可以删掉这个if
    # if result[2] != 'D' and result[2] != 'F' \
    #         and result[-1] != 'D' and result[-1] != 'F':
    #     print(filename)
    #     print("Error label, Please check!")
    #     assert 0, "Error label ^~^!!!"
    # print(result)
    
    img_path = os.path.join(path, filename)
    img = cv2.imread(img_path)
    assert os.path.exists(img_path), "image file {} dose not exist.".format(img_path)
    box = box.split('_')  # 车牌边界
    box = [list(map(int, i.split('&'))) for i in box]

    xmin = box[0][0]
    xmax = box[1][0]
    ymin = box[0][1]
    ymax = box[1][1]

    img = Image.fromarray(img)
    img = img.crop((xmin, ymin, xmax, ymax))  # 裁剪出车牌位置
    img = img.resize((94, 24), Image.LANCZOS)
    img = np.asarray(img)  # 转成array,变成24*94*3  

    cv2.imencode('.jpg', img)[1].tofile(os.path.join(save_path, r"{}.jpg".format(result)))
    # 图片中文名会报错
    # cv2.imwrite(r"K:\MyProject\datasets\ccpd\new\ccpd_2020\rec_images\train\{}.jpg".format(result), img)  # 改成自己存放的路径

数据集划分

import os
import random

image_dir = "rec_images/data" 
train_file = 'rec_images/train.txt'
eval_file = 'rec_images/valid.txt'

dataset_list = os.listdir(image_dir)

train_num = 0
valid_num = 0
for img_name in dataset_list:
    if '.jpg' not in img_name:
        print(img_name)
        continue
    probo = random.randint(1, 100)
    if(probo <= 80): # train
        with open(train_file, 'a') as f_train:
            f_train.write(img_name+'\n')
        train_num+=1
    else: #valid
        with open(eval_file, 'a') as f_eval:
            f_eval.write(img_name+'\n')
        valid_num+=1
print(f'train: {train_num}, val:{valid_num}')


.ipynb_checkpoints
train: 63068, val:15828

Dataloader

数据读取
import os
from paddle.io import Dataset
from PIL import Image
import numpy as np

CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
         '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
         '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
         '新',
         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
         'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
         'W', 'X', 'Y', 'Z', 'I', 'O', '-'
         ]

CHARS_DICT = {char:i for i, char in enumerate(CHARS)}

class LprnetDataloader(Dataset):
    def __init__(self, target_path, label_text, transforms=None):
        super().__init__()
        self.transforms = transforms
        self.target_path = target_path
        with open(label_text) as f:
            self.data = f.readlines()

    def __getitem__(self, index):
        img_name = self.data[index].strip()
        img_path = os.path.join(self.target_path, img_name)
        data = Image.open(img_path)
        
        label = []
        img_label = img_name.split('.', 1)[0]
        for c in img_label:
            label.append(CHARS_DICT[c])
        if len(label) == 8:
            if self.check(label) == False:
                print(imgname)
                assert 0, "Error label ^~^!!!"

        if self.transforms is not None:
            data = self.transforms(data)

        data = np.array(data, dtype=np.float32)
        np_label = np.array(label, dtype=np.int64)
        return data, np_label, len(np_label)

    def __len__(self):
        return len(self.data)
    
    def check(self, label):
        if label[2] != CHARS_DICT['D'] and label[2] != CHARS_DICT['F'] \
                and label[-1] != CHARS_DICT['D'] and label[-1] != CHARS_DICT['F']:
            print("Error label, Please check!")
            return False
        else:
            return True

组batch

将不同长度的label,padding为以最大标签长度的同一尺寸,shape为(batch_size,max_label_length)

def collate_fn(batch):
    # 图片输入已经规范到相同大小,这里只需要对标签进行padding
    batch_size = len(batch)
    # 找出标签最长的
    batch_temp = sorted(batch, key=lambda sample: len(sample[1]), reverse=True)
    max_label_length = len(batch_temp[0][1])
    # 以最大的长度创建0张量
    labels = np.zeros((batch_size, max_label_length), dtype='int64')
    label_lens = []
    img_list = []
    for x in range(batch_size):
        sample = batch[x]
        tensor = sample[0]
        target = sample[1]
        label_length = sample[2]
        img_list.append(tensor)
        # 将数据插入都0张量中,实现了padding
        labels[x, :label_length] = target[:]
        label_lens.append(len(target))
    label_lens = paddle.to_tensor(label_lens, dtype='int64')  # ctcloss需要
    imgs = paddle.to_tensor(img_list, dtype='float32')
    labels = paddle.to_tensor(labels, dtype="int32")  # ctcloss仅支持int32的labels
    return imgs, labels, label_lens

数据前处理

这里数据集量挺多,各种情况的数据都有(天气,角度,模糊),就不再做数据增强的操作了。

就简单做个归一化操作就好了,训练的时候对数据进行ToTensor + Normalize

import paddle.vision.transforms as T

train_transforms = T.Compose([     
            T.ToTensor(data_format='CHW'),  # 这里的CHW是指数据的输出格式
            T.Normalize(
                [0.5, 0.5, 0.5],  # 在totensor的时候已经将图片缩放到0-1
                [0.5, 0.5, 0.5],
                data_format='CHW'  # 这里是数据输入格式
            ), 
        ])

LPRNet网络

网络结构
import paddle.nn as nn
import paddle


CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
         '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
         '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
         '新',
         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
         'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
         'W', 'X', 'Y', 'Z', 'I', 'O', '-'
         ]


class small_basic_block(nn.Layer):
    def __init__(self, ch_in, ch_out):
        super(small_basic_block, self).__init__()
        self.block = nn.Sequential(
            nn.Conv2D(ch_in, ch_out // 4, kernel_size=1),
            nn.ReLU(),
            nn.Conv2D(ch_out // 4, ch_out // 4, kernel_size=(3, 1), padding=(1, 0)),
            nn.ReLU(),
            nn.Conv2D(ch_out // 4, ch_out // 4, kernel_size=(1, 3), padding=(0, 1)),
            nn.ReLU(),
            nn.Conv2D(ch_out // 4, ch_out, kernel_size=1),
        )
    def forward(self, x):
        return self.block(x)


class maxpool_3d(nn.Layer):
    def __init__(self, kernel_size, stride):
        super(maxpool_3d, self).__init__()
        assert(len(kernel_size)==3 and len(stride)==3)
        kernel_size2d1 = kernel_size[-2:]
        stride2d1 = stride[-2:]
        kernel_size2d2 = (1, kernel_size[0])
        stride2d2 = (1, stride[0])
        self.maxpool1 = nn.MaxPool2D(kernel_size=kernel_size2d1, stride=stride2d1)
        self.maxpool2 = nn.MaxPool2D(kernel_size=kernel_size2d2, stride=stride2d2)

    def forward(self,x):
        x = self.maxpool1(x)
        x = x.transpose((0, 3, 2, 1))
        x = self.maxpool2(x)
        x = x.transpose((0, 3, 2, 1))
        return x


class LPRNet(nn.Layer):
    def __init__(self, lpr_max_len, class_num, dropout_rate):
        super(LPRNet, self).__init__()
        self.lpr_max_len = lpr_max_len
        self.class_num = class_num
        self.backbone = nn.Sequential(
            nn.Conv2D(in_channels=3, out_channels=64, kernel_size=3, stride=1),    # 0  [bs,3,24,94] -> [bs,64,22,92]
            nn.BatchNorm2D(num_features=64),                                       # 1  -> [bs,64,22,92]
            nn.ReLU(),                                                             # 2  -> [bs,64,22,92]
            maxpool_3d(kernel_size=(1, 3, 3), stride=(1, 1, 1)),                 # 3  -> [bs,64,20,90]
            small_basic_block(ch_in=64, ch_out=128),                               # 4  -> [bs,128,20,90]
            nn.BatchNorm2D(num_features=128),                                      # 5  -> [bs,128,20,90]
            nn.ReLU(),                                                             # 6  -> [bs,128,20,90]
            maxpool_3d(kernel_size=(1, 3, 3), stride=(2, 1, 2)),                 # 7  -> [bs,64,18,44]
            small_basic_block(ch_in=64, ch_out=256),                               # 8  -> [bs,256,18,44]
            nn.BatchNorm2D(num_features=256),                                      # 9  -> [bs,256,18,44]
            nn.ReLU(),                                                             # 10 -> [bs,256,18,44]
            small_basic_block(ch_in=256, ch_out=256),                              # 11 -> [bs,256,18,44]
            nn.BatchNorm2D(num_features=256),                                      # 12 -> [bs,256,18,44]
            nn.ReLU(),                                                             # 13 -> [bs,256,18,44]
            maxpool_3d(kernel_size=(1, 3, 3), stride=(4, 1, 2)),                 # 14 -> [bs,64,16,21]
            nn.Dropout(dropout_rate),                                              # 15 -> [bs,64,16,21]
            nn.Conv2D(in_channels=64, out_channels=256, kernel_size=(1, 4), stride=1),   # 16 -> [bs,256,16,18]
            nn.BatchNorm2D(num_features=256),                                            # 17 -> [bs,256,16,18]
            nn.ReLU(),                                                                   # 18 -> [bs,256,16,18]
            nn.Dropout(dropout_rate),                                                    # 19 -> [bs,256,16,18]
            nn.Conv2D(in_channels=256, out_channels=class_num, kernel_size=(13, 1), stride=1),  # class_num=68  20  -> [bs,68,4,18]
            nn.BatchNorm2D(num_features=class_num),                                             # 21 -> [bs,68,4,18]
            nn.ReLU(),                                                                          # 22 -> [bs,68,4,18]
        )
        self.container = nn.Sequential(
            nn.Conv2D(in_channels=448+self.class_num, out_channels=self.class_num, kernel_size=(1, 1), stride=(1, 1)),
        )

    def forward(self, x):
        keep_features = list()
        for i, layer in enumerate(self.backbone.children()):
            x = layer(x)
            if i in [2, 6, 13, 22]:
                keep_features.append(x)

        global_context = list()
        # keep_features: [bs,64,22,92]  [bs,128,20,90] [bs,256,18,44] [bs,68,4,18]
        for i, f in enumerate(keep_features):
            if i in [0, 1]:
                # [bs,64,22,92] -> [bs,64,4,18]
                # [bs,128,20,90] -> [bs,128,4,18]
                f = nn.AvgPool2D(kernel_size=5, stride=5)(f)
            if i in [2]:
                # [bs,256,18,44] -> [bs,256,4,18]
                f = nn.AvgPool2D(kernel_size=(4, 10), stride=(4, 2))(f)

            f_pow = paddle.pow(f, 2)     # [bs,64,4,18]  所有元素求平方
            f_mean = paddle.mean(f_pow)  # 1 所有元素求平均
            f = paddle.divide(f, f_mean)    # [bs,64,4,18]  所有元素除以这个均值
            global_context.append(f)

        x = paddle.concat(global_context, 1)  # [bs,516,4,18]
        x = self.container(x)  # -> [bs, 68, 4, 18]   head头
        logits = paddle.mean(x, axis=2)  # -> [bs, 68, 18]  # 68 字符类别数   18字符序列长度

        return logits
权重初始化函数
# 使用model.applay的方法,可以修改到每一个子层
def init_weight(model):
    for name, layer in model.named_sublayers():
        if isinstance(layer, nn.Conv2D):
            weight_attr = nn.initializer.KaimingNormal()
            bias_attr = nn.initializer.Constant(0.)
            init_bias = paddle.create_parameter(layer.bias.shape, attr=bias_attr, dtype='float32')
            init_weight = paddle.create_parameter(layer.weight.shape, attr=weight_attr, dtype='float32')
            layer.weight = init_weight
            layer.bias = init_bias
        elif isinstance(layer, nn.BatchNorm2D):
            weight_attr = nn.initializer.XavierUniform()
            bias_attr = nn.initializer.Constant(0.)
            init_bias = paddle.create_parameter(layer.bias.shape, attr=bias_attr, dtype='float32')
            init_weight = paddle.create_parameter(layer.weight.shape, attr=weight_attr, dtype='float32')
            layer.weight = init_weight
            layer.bias = init_bias

损失函数

损失函数是CTCLoss,需要传入的参数有:

  1. logits: 概率序列, shape=[max_logit_length, batch_size, num_classes+1]

    数据类型仅支持float32

  2. lbels: padding后的标签序列,shape=[batch_size, max_label_length]

    数据类型仅支持int32

  3. input_lengths: 输入logits数据中的每个序列的长度,shape=[batch_size]

    数据类型仅支持int64

  4. label_lengths: label中每个序列的长度,shape=[batch_size]

    数据类型仅支持int64

ctcloss文档:https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/CTCLoss_cn.html

准确率计算函数

import numpy as np

CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
         '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
         '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
         '新',
         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
         'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
         'W', 'X', 'Y', 'Z', 'I', 'O', '-'
         ]

class ACC:
    def __init__(self):
        self.Tp = 0
        self.Tn_1 = 0
        self.Tn_2 = 0
        self.acc = 0

    def batch_update(self, batch_label, label_lengths, pred):
        for i, label in enumerate(batch_label):
            length = label_lengths[i]
            label = label[:length]
            pred_i = pred[i, :, :]
            preb_label = []
            for j in range(pred_i.shape[1]):  # T
                preb_label.append(np.argmax(pred_i[:, j], axis=0))
            no_repeat_blank_label = []
            pre_c = preb_label[0]
            if pre_c != len(CHARS) - 1:  # 非空白
                no_repeat_blank_label.append(pre_c)
            for c in preb_label:  # dropout repeate label and blank label
                if (pre_c == c) or (c == len(CHARS) - 1):
                    if c == len(CHARS) - 1:
                        pre_c = c
                    continue
                no_repeat_blank_label.append(c)
                pre_c = c
            # print('no_repeat_blank_label:', no_repeat_blank_label)
            # print('gt_label:', label)
            if len(label) != len(no_repeat_blank_label):
                self.Tn_1 += 1
            elif (np.asarray(label) == np.asarray(no_repeat_blank_label)).all():
                self.Tp += 1
            else:
                self.Tn_2 += 1
        self.acc = self.Tp * 1.0 / (self.Tp + self.Tn_1 + self.Tn_2)

    def clear(self):
        self.Tp = 0
        self.Tn_1 = 0
        self.Tn_2 = 0
        self.acc = 0

print(len(CHARS))
68

加载预训练参数

一次训练没有到位,在之前的权重参数基础上继续训练,需要加载预训练权重。

# 这里第一次训练了100epoch, 最佳acc为0.86
# 保存的权重路径:runs/lprnet_best.pdparams
import os

def load_pretrained(model, path=None):
    print('params loading...')
    if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')):
        raise ValueError("Model pretrain path {} does not "
                         "exists.".format(path))
    param_state_dict = paddle.load(path + ".pdparams")
    model.set_dict(param_state_dict)
    print(f'load {path + ".pdparams"} success...')
    return

训练

第一次训练得到的模型为:runs/lprnet_best.pdparams

第二层训练得到的模型为:runs/lprnet_best_2.pdparams

第一次训练

未使用数据增广策略

EPOCH = 100

DROPOUT = 0.

LEARNINGRATE = 0.00005

TRAINBATCHSIZE = 128

WEIGHTDECAY = 0

最终,epoch97的时候

  1. 训练集acc为0.8748,训练集loss为0.0775

  2. 验证集为最佳acc为0.8636,验证集loss为0.0731

可以看出模型准确率还有提升空间,并且验证集loss和训练集loss仍然接近,说明处于欠拟合的状态

第二次训练

加载第一次的bestacc模型权重,此时模型的权重已经趋于稳定,学习率衰减策略就不采用warmup了。

这次我加大batch的基础上,增加基础学习率到0.001,相较于第一次大了很多,主要是考虑快速调参得到理想的模型:

还添加了数据增强(ColorJitter)和L2Decay(),其他参数如下:

EPOCH = 40

DROPOUT = 0.

LEARNINGRATE = 0.001

TRAINBATCHSIZE = 256

WEIGHTDECAY = 0.001

import paddle.vision.transforms as T
from paddle.io import DataLoader
import time
from statistics import mean

# 参数定义
EPOCH = 40
IMGSIZE = (94, 24)
IMGDIR = 'rec_images/data'
TRAINFILE = 'rec_images/train.txt'
VALIDFILE = 'rec_images/valid.txt'
SAVEFOLDER = './runs'
DROPOUT = 0.
LEARNINGRATE = 0.001
LPRMAXLEN = 18
TRAINBATCHSIZE = 256
EVALBATCHSIZE = 256
NUMWORKERS = 2  # 若dataloader报错,调小该参数,或直接改为0
WEIGHTDECAY = 0.001

# 图片预处理
train_transforms = T.Compose([  
            T.ColorJitter(0.2,0.2,0.2),   # 第一训练的时候没有添加该数据增广
            T.ToTensor(data_format='CHW'), 
            T.Normalize(
                [0.5, 0.5, 0.5],  # 在totensor的时候已经将图片缩放到0-1
                [0.5, 0.5, 0.5],
                data_format='CHW' 
            ), 
        ])
eval_transforms = T.Compose([    
            T.ToTensor(data_format='CHW'), 
            T.Normalize(
                [0.5, 0.5, 0.5], 
                [0.5, 0.5, 0.5],
                data_format='CHW' 
            ), 
        ])

# 数据加载
train_data_set = LprnetDataloader(IMGDIR, TRAINFILE, train_transforms)
eval_data_set = LprnetDataloader(IMGDIR, VALIDFILE, eval_transforms)
train_loader = DataLoader(
    train_data_set, 
    batch_size=TRAINBATCHSIZE, 
    shuffle=True, 
    num_workers=NUMWORKERS, 
    drop_last=True, 
    collate_fn=collate_fn
)
eval_loader = DataLoader(
    eval_data_set, 
    batch_size=EVALBATCHSIZE, 
    shuffle=False, 
    num_workers=NUMWORKERS, 
    drop_last=False, 
    collate_fn=collate_fn
)

# 定义loss
loss_func = nn.CTCLoss(len(CHARS)-1)

# input_length, loss计算需要
input_length = np.ones(shape=TRAINBATCHSIZE) * LPRMAXLEN
input_length = paddle.to_tensor(input_length, dtype='int64')

# LPRNet网络,初始化/加载预训练参数
model = LPRNet(LPRMAXLEN, len(CHARS), DROPOUT)
# model.apply(init_weight)  # 首次训练时初始化
load_pretrained(model, 'runs/lprnet_best')  # 第二次在未完成训练的权重上再训练

# 定义优化器
def make_optimizer(base_lr, parameters=None):
    momentum = 0.9
    weight_decay = WEIGHTDECAY
    scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
        learning_rate=base_lr, eta_min=0, T_max=EPOCH, verbose=1)

    # scheduler = paddle.optimizer.lr.LinearWarmup(  # 第一次训练的时候考虑模型权重不稳定,添加warmup策略
    #     learning_rate=scheduler,
    #     warmup_steps=5,
    #     start_lr=base_lr/5,
    #     end_lr=base_lr,
    #     verbose=True)

    optimizer = paddle.optimizer.Momentum(
        learning_rate=scheduler,
        weight_decay=paddle.regularizer.L2Decay(weight_decay),
        momentum=momentum,
        parameters=parameters)
    return optimizer, scheduler
optim, scheduler = make_optimizer(LEARNINGRATE, parameters=model.parameters())

# acc
acc_train = ACC()
acc_eval = ACC()
BESTACC = 0.5

# 训练流程
for epoch in range(EPOCH):

    start_time = time.localtime(time.time())
    str_time = time.strftime("%Y-%m-%d %H:%M:%S", start_time)
    print(f'{str_time} || Epoch {epoch} start:')

    model.train()
    for batch_id, bath_data in enumerate(train_loader):
        img_data, label_data, label_lens = bath_data
        
        predict = model(img_data)
        logits  = paddle.transpose(predict, (2,0,1))  # for ctc loss: T x N x C

        loss = loss_func(logits , label_data, input_length, label_lens)
        acc_train.batch_update(label_data, label_lens, predict)
        if batch_id % 50 == 0:
            print(f'epoch:{epoch}, batch_id:{batch_id}, loss:{loss.item():.4f}, \
            acc:{acc_train.acc:.4f} Tp/Tn_1/Tn_2: {acc_train.Tp}/{acc_train.Tn_1}/{acc_train.Tn_2}')
        
        loss.backward()
        optim.step()
        optim.clear_grad()
    acc_train.clear()
    
    # save
    if epoch and epoch % 20 == 0:
        paddle.save(model.state_dict(), os.path.join(SAVEFOLDER,f'lprnet_{epoch}_2.pdparams'))
        paddle.save(optim.state_dict(), os.path.join(SAVEFOLDER,f'lprnet_{epoch}_2.pdopt'))
        print(f'Saved log ecpch-{epoch}')

    # eval
    with paddle.no_grad():
        model.eval()
        loss_list = []
        for batch_id, bath_data in enumerate(eval_loader):
            img_data, label_data, label_lens = bath_data
            predict = model(img_data)
            logits = paddle.transpose(predict, (2,0,1))
            loss = loss_func(logits, label_data, input_length, label_lens)
            acc_eval.batch_update(label_data, label_lens, predict)
            loss_list.append(loss.item())
        print(f'Eval of epoch {epoch} => acc:{acc_eval.acc:.4f}, loss:{mean(loss_list):.4f}')

        # save best model
        if acc_eval.acc > BESTACC:
            paddle.save(model.state_dict(), os.path.join(SAVEFOLDER,f'lprnet_best_2.pdparams'))
            paddle.save(optim.state_dict(), os.path.join(SAVEFOLDER,f'lprnet_best_2.pdopt'))
            BESTACC = acc_eval.acc
            print(f'Saved best model of epoch{epoch}, acc {acc_eval.acc:.4f}, save path "{SAVEFOLDER}"')
        acc_eval.clear()
    
    # 学习率衰减策略
    scheduler.step()





params loading...
load runs/lprnet_best.pdparams success...
Epoch 0: CosineAnnealingDecay set learning rate to 0.001.
2023-03-14 07:56:07 || Epoch 0 start:
epoch:0, batch_id:0, loss:0.1304,             acc:0.8633 Tp/Tn_1/Tn_2: 221/13/22
epoch:0, batch_id:50, loss:0.1420,             acc:0.8477 Tp/Tn_1/Tn_2: 11068/703/1285
epoch:0, batch_id:100, loss:0.1476,             acc:0.8473 Tp/Tn_1/Tn_2: 21909/1402/2545
epoch:0, batch_id:150, loss:0.0885,             acc:0.8493 Tp/Tn_1/Tn_2: 32832/2027/3797
epoch:0, batch_id:200, loss:0.1362,             acc:0.8506 Tp/Tn_1/Tn_2: 43768/2650/5038
Eval of epoch 0 => acc:0.8604, loss:0.0755
Saved best model of epoch0, acc 0.8604, save path "./runs"
Epoch 1: CosineAnnealingDecay set learning rate to 0.000998458666866564.
2023-03-14 07:58:26 || Epoch 1 start:
epoch:1, batch_id:0, loss:0.0813,             acc:0.8594 Tp/Tn_1/Tn_2: 220/10/26
epoch:1, batch_id:50, loss:0.0960,             acc:0.8732 Tp/Tn_1/Tn_2: 11401/560/1095
epoch:1, batch_id:100, loss:0.0869,             acc:0.8710 Tp/Tn_1/Tn_2: 22520/1121/2215
epoch:1, batch_id:150, loss:0.0712,             acc:0.8729 Tp/Tn_1/Tn_2: 33743/1658/3255
epoch:1, batch_id:200, loss:0.0911,             acc:0.8742 Tp/Tn_1/Tn_2: 44982/2181/4293
Eval of epoch 1 => acc:0.8774, loss:0.0602
Saved best model of epoch1, acc 0.8774, save path "./runs"
Epoch 2: CosineAnnealingDecay set learning rate to 0.0009938441702975688.
2023-03-14 08:00:41 || Epoch 2 start:
epoch:2, batch_id:0, loss:0.0553,             acc:0.9102 Tp/Tn_1/Tn_2: 233/4/19
epoch:2, batch_id:50, loss:0.1110,             acc:0.8826 Tp/Tn_1/Tn_2: 11523/508/1025
epoch:2, batch_id:100, loss:0.1028,             acc:0.8837 Tp/Tn_1/Tn_2: 22850/995/2011
epoch:2, batch_id:150, loss:0.0499,             acc:0.8867 Tp/Tn_1/Tn_2: 34278/1445/2933
epoch:2, batch_id:200, loss:0.0713,             acc:0.8875 Tp/Tn_1/Tn_2: 45666/1894/3896
Eval of epoch 2 => acc:0.8726, loss:0.0792
Epoch 3: CosineAnnealingDecay set learning rate to 0.0009861849601988382.
2023-03-14 08:02:57 || Epoch 3 start:
epoch:3, batch_id:0, loss:0.1095,             acc:0.8906 Tp/Tn_1/Tn_2: 228/8/20
epoch:3, batch_id:50, loss:0.0588,             acc:0.9008 Tp/Tn_1/Tn_2: 11761/443/852
epoch:3, batch_id:100, loss:0.0740,             acc:0.8991 Tp/Tn_1/Tn_2: 23246/862/1748
epoch:3, batch_id:150, loss:0.0416,             acc:0.9011 Tp/Tn_1/Tn_2: 34834/1272/2550
epoch:3, batch_id:200, loss:0.0590,             acc:0.9006 Tp/Tn_1/Tn_2: 46342/1704/3410
Eval of epoch 3 => acc:0.8946, loss:0.0502
Saved best model of epoch3, acc 0.8946, save path "./runs"
Epoch 4: CosineAnnealingDecay set learning rate to 0.0009755282581475767.
2023-03-14 08:05:13 || Epoch 4 start:
epoch:4, batch_id:0, loss:0.0645,             acc:0.9180 Tp/Tn_1/Tn_2: 235/8/13
epoch:4, batch_id:50, loss:0.0699,             acc:0.9111 Tp/Tn_1/Tn_2: 11895/363/798
epoch:4, batch_id:100, loss:0.0824,             acc:0.9092 Tp/Tn_1/Tn_2: 23509/714/1633
epoch:4, batch_id:150, loss:0.0504,             acc:0.9092 Tp/Tn_1/Tn_2: 35146/1097/2413
epoch:4, batch_id:200, loss:0.0514,             acc:0.9106 Tp/Tn_1/Tn_2: 46856/1448/3152
Eval of epoch 4 => acc:0.8983, loss:0.0448
Saved best model of epoch4, acc 0.8983, save path "./runs"
Epoch 5: CosineAnnealingDecay set learning rate to 0.0009619397662556432.
2023-03-14 08:07:30 || Epoch 5 start:
epoch:5, batch_id:0, loss:0.0853,             acc:0.8984 Tp/Tn_1/Tn_2: 230/7/19
epoch:5, batch_id:50, loss:0.0550,             acc:0.9197 Tp/Tn_1/Tn_2: 12008/326/722
epoch:5, batch_id:100, loss:0.0544,             acc:0.9192 Tp/Tn_1/Tn_2: 23767/678/1411
epoch:5, batch_id:150, loss:0.0607,             acc:0.9186 Tp/Tn_1/Tn_2: 35509/1031/2116
epoch:5, batch_id:200, loss:0.0527,             acc:0.9191 Tp/Tn_1/Tn_2: 47292/1350/2814
Eval of epoch 5 => acc:0.9010, loss:0.0446
Saved best model of epoch5, acc 0.9010, save path "./runs"
Epoch 6: CosineAnnealingDecay set learning rate to 0.0009455032620941838.
2023-03-14 08:09:46 || Epoch 6 start:
epoch:6, batch_id:0, loss:0.0404,             acc:0.9492 Tp/Tn_1/Tn_2: 243/3/10
epoch:6, batch_id:50, loss:0.0688,             acc:0.9262 Tp/Tn_1/Tn_2: 12093/302/661
epoch:6, batch_id:100, loss:0.0548,             acc:0.9257 Tp/Tn_1/Tn_2: 23936/588/1332
epoch:6, batch_id:150, loss:0.0435,             acc:0.9236 Tp/Tn_1/Tn_2: 35701/911/2044
epoch:6, batch_id:200, loss:0.0363,             acc:0.9247 Tp/Tn_1/Tn_2: 47582/1194/2680
Eval of epoch 6 => acc:0.8995, loss:0.0507
Epoch 7: CosineAnnealingDecay set learning rate to 0.000926320082177046.
2023-03-14 08:12:01 || Epoch 7 start:
epoch:7, batch_id:0, loss:0.0579,             acc:0.9297 Tp/Tn_1/Tn_2: 238/5/13
epoch:7, batch_id:50, loss:0.0301,             acc:0.9305 Tp/Tn_1/Tn_2: 12149/282/625
epoch:7, batch_id:100, loss:0.0525,             acc:0.9305 Tp/Tn_1/Tn_2: 24058/555/1243
epoch:7, batch_id:150, loss:0.0808,             acc:0.9292 Tp/Tn_1/Tn_2: 35919/844/1893
epoch:7, batch_id:200, loss:0.0519,             acc:0.9294 Tp/Tn_1/Tn_2: 47824/1113/2519
Eval of epoch 7 => acc:0.9201, loss:0.0333
Saved best model of epoch7, acc 0.9201, save path "./runs"
Epoch 8: CosineAnnealingDecay set learning rate to 0.0009045084971874736.
2023-03-14 08:14:16 || Epoch 8 start:
epoch:8, batch_id:0, loss:0.0313,             acc:0.9375 Tp/Tn_1/Tn_2: 240/2/14
epoch:8, batch_id:50, loss:0.0373,             acc:0.9298 Tp/Tn_1/Tn_2: 12140/281/635
epoch:8, batch_id:100, loss:0.0523,             acc:0.9320 Tp/Tn_1/Tn_2: 24097/532/1227
epoch:8, batch_id:150, loss:0.0531,             acc:0.9336 Tp/Tn_1/Tn_2: 36088/782/1786
epoch:8, batch_id:200, loss:0.0407,             acc:0.9331 Tp/Tn_1/Tn_2: 48016/1043/2397
Eval of epoch 8 => acc:0.9303, loss:0.0284
Saved best model of epoch8, acc 0.9303, save path "./runs"
Epoch 9: CosineAnnealingDecay set learning rate to 0.0008802029828000154.
2023-03-14 08:16:31 || Epoch 9 start:
epoch:9, batch_id:0, loss:0.0309,             acc:0.9453 Tp/Tn_1/Tn_2: 242/7/7
epoch:9, batch_id:50, loss:0.0366,             acc:0.9413 Tp/Tn_1/Tn_2: 12290/238/528
epoch:9, batch_id:100, loss:0.0388,             acc:0.9397 Tp/Tn_1/Tn_2: 24297/485/1074
epoch:9, batch_id:150, loss:0.0396,             acc:0.9391 Tp/Tn_1/Tn_2: 36303/741/1612
epoch:9, batch_id:200, loss:0.0400,             acc:0.9383 Tp/Tn_1/Tn_2: 48282/995/2179
Eval of epoch 9 => acc:0.9287, loss:0.0334
Epoch 10: CosineAnnealingDecay set learning rate to 0.0008535533905932736.
2023-03-14 08:18:46 || Epoch 10 start:
epoch:10, batch_id:0, loss:0.0280,             acc:0.9453 Tp/Tn_1/Tn_2: 242/4/10
epoch:10, batch_id:50, loss:0.0624,             acc:0.9411 Tp/Tn_1/Tn_2: 12287/226/543
epoch:10, batch_id:100, loss:0.0498,             acc:0.9414 Tp/Tn_1/Tn_2: 24340/452/1064
epoch:10, batch_id:150, loss:0.0298,             acc:0.9404 Tp/Tn_1/Tn_2: 36354/682/1620
epoch:10, batch_id:200, loss:0.0436,             acc:0.9403 Tp/Tn_1/Tn_2: 48384/910/2162
Eval of epoch 10 => acc:0.9321, loss:0.0315
Saved best model of epoch10, acc 0.9321, save path "./runs"
Epoch 11: CosineAnnealingDecay set learning rate to 0.0008247240241650918.
2023-03-14 08:21:19 || Epoch 11 start:
epoch:11, batch_id:0, loss:0.0349,             acc:0.9570 Tp/Tn_1/Tn_2: 245/3/8
epoch:11, batch_id:50, loss:0.0549,             acc:0.9462 Tp/Tn_1/Tn_2: 12353/223/480
epoch:11, batch_id:100, loss:0.0417,             acc:0.9464 Tp/Tn_1/Tn_2: 24470/437/949
epoch:11, batch_id:150, loss:0.0620,             acc:0.9458 Tp/Tn_1/Tn_2: 36559/651/1446
epoch:11, batch_id:200, loss:0.0478,             acc:0.9450 Tp/Tn_1/Tn_2: 48628/875/1953
Eval of epoch 11 => acc:0.9306, loss:0.0375
Epoch 12: CosineAnnealingDecay set learning rate to 0.0007938926261462365.
2023-03-14 08:23:35 || Epoch 12 start:
epoch:12, batch_id:0, loss:0.0348,             acc:0.9453 Tp/Tn_1/Tn_2: 242/2/12
epoch:12, batch_id:50, loss:0.0258,             acc:0.9488 Tp/Tn_1/Tn_2: 12388/185/483
epoch:12, batch_id:100, loss:0.0482,             acc:0.9481 Tp/Tn_1/Tn_2: 24515/409/932
epoch:12, batch_id:150, loss:0.0622,             acc:0.9476 Tp/Tn_1/Tn_2: 36631/609/1416
epoch:12, batch_id:200, loss:0.0454,             acc:0.9473 Tp/Tn_1/Tn_2: 48744/810/1902
Eval of epoch 12 => acc:0.9350, loss:0.0259
Saved best model of epoch12, acc 0.9350, save path "./runs"
Epoch 13: CosineAnnealingDecay set learning rate to 0.0007612492823579742.
2023-03-14 08:25:51 || Epoch 13 start:
epoch:13, batch_id:0, loss:0.0191,             acc:0.9531 Tp/Tn_1/Tn_2: 244/4/8
epoch:13, batch_id:50, loss:0.0479,             acc:0.9513 Tp/Tn_1/Tn_2: 12420/177/459
epoch:15, batch_id:150, loss:0.0369,             acc:0.9553 Tp/Tn_1/Tn_2: 36929/493/1234
epoch:15, batch_id:200, loss:0.0332,             acc:0.9550 Tp/Tn_1/Tn_2: 49139/669/1648
Eval of epoch 15 => acc:0.9368, loss:0.0310
Epoch 16: CosineAnnealingDecay set learning rate to 0.0006545084971874736.
2023-03-14 08:32:57 || Epoch 16 start:
epoch:16, batch_id:0, loss:0.0336,             acc:0.9453 Tp/Tn_1/Tn_2: 242/5/9
epoch:16, batch_id:50, loss:0.0234,             acc:0.9565 Tp/Tn_1/Tn_2: 12488/162/406
epoch:16, batch_id:100, loss:0.0728,             acc:0.9547 Tp/Tn_1/Tn_2: 24686/361/809
epoch:16, batch_id:150, loss:0.0279,             acc:0.9549 Tp/Tn_1/Tn_2: 36912/526/1218
epoch:16, batch_id:200, loss:0.0272,             acc:0.9548 Tp/Tn_1/Tn_2: 49131/691/1634
Eval of epoch 16 => acc:0.9381, loss:0.0312
Saved best model of epoch16, acc 0.9381, save path "./runs"
Epoch 17: CosineAnnealingDecay set learning rate to 0.0006167226819279527.
2023-03-14 08:35:12 || Epoch 17 start:
epoch:17, batch_id:0, loss:0.0267,             acc:0.9531 Tp/Tn_1/Tn_2: 244/5/7
epoch:17, batch_id:50, loss:0.0416,             acc:0.9578 Tp/Tn_1/Tn_2: 12505/163/388
epoch:17, batch_id:100, loss:0.0286,             acc:0.9579 Tp/Tn_1/Tn_2: 24768/328/760
epoch:17, batch_id:150, loss:0.0115,             acc:0.9567 Tp/Tn_1/Tn_2: 36984/508/1164
epoch:17, batch_id:200, loss:0.0334,             acc:0.9565 Tp/Tn_1/Tn_2: 49219/664/1573
Eval of epoch 17 => acc:0.9433, loss:0.0264
Saved best model of epoch17, acc 0.9433, save path "./runs"
Epoch 18: CosineAnnealingDecay set learning rate to 0.0005782172325201154.
2023-03-14 08:37:27 || Epoch 18 start:
epoch:18, batch_id:0, loss:0.0202,             acc:0.9648 Tp/Tn_1/Tn_2: 247/4/5
epoch:18, batch_id:50, loss:0.0210,             acc:0.9574 Tp/Tn_1/Tn_2: 12500/175/381
epoch:18, batch_id:100, loss:0.0344,             acc:0.9575 Tp/Tn_1/Tn_2: 24757/331/768
epoch:18, batch_id:150, loss:0.0305,             acc:0.9570 Tp/Tn_1/Tn_2: 36994/495/1167
epoch:18, batch_id:200, loss:0.0172,             acc:0.9574 Tp/Tn_1/Tn_2: 49262/660/1534
Eval of epoch 18 => acc:0.9417, loss:0.0248
Epoch 19: CosineAnnealingDecay set learning rate to 0.0005392295478639224.
2023-03-14 08:39:43 || Epoch 19 start:
epoch:19, batch_id:0, loss:0.0388,             acc:0.9414 Tp/Tn_1/Tn_2: 241/3/12
epoch:19, batch_id:50, loss:0.0152,             acc:0.9599 Tp/Tn_1/Tn_2: 12532/140/384
epoch:19, batch_id:100, loss:0.0576,             acc:0.9594 Tp/Tn_1/Tn_2: 24805/308/743
epoch:19, batch_id:150, loss:0.0427,             acc:0.9595 Tp/Tn_1/Tn_2: 37092/442/1122
epoch:19, batch_id:200, loss:0.0163,             acc:0.9599 Tp/Tn_1/Tn_2: 49392/590/1474
Eval of epoch 19 => acc:0.9426, loss:0.0260
Epoch 20: CosineAnnealingDecay set learning rate to 0.0004999999999999999.
2023-03-14 08:42:01 || Epoch 20 start:
epoch:20, batch_id:0, loss:0.0304,             acc:0.9492 Tp/Tn_1/Tn_2: 243/4/9
epoch:20, batch_id:50, loss:0.0252,             acc:0.9619 Tp/Tn_1/Tn_2: 12559/143/354
epoch:20, batch_id:100, loss:0.0164,             acc:0.9627 Tp/Tn_1/Tn_2: 24892/265/699
epoch:20, batch_id:150, loss:0.0241,             acc:0.9616 Tp/Tn_1/Tn_2: 37173/435/1048
epoch:20, batch_id:200, loss:0.0401,             acc:0.9612 Tp/Tn_1/Tn_2: 49462/575/1419
Saved log ecpch-20
Eval of epoch 20 => acc:0.9447, loss:0.0294
Saved best model of epoch20, acc 0.9447, save path "./runs"
Epoch 21: CosineAnnealingDecay set learning rate to 0.0004607704521360775.
2023-03-14 08:44:22 || Epoch 21 start:
epoch:21, batch_id:0, loss:0.0155,             acc:0.9688 Tp/Tn_1/Tn_2: 248/2/6
epoch:21, batch_id:50, loss:0.0132,             acc:0.9642 Tp/Tn_1/Tn_2: 12588/153/315
epoch:21, batch_id:100, loss:0.0288,             acc:0.9621 Tp/Tn_1/Tn_2: 24877/284/695
epoch:21, batch_id:150, loss:0.0277,             acc:0.9615 Tp/Tn_1/Tn_2: 37168/442/1046
epoch:21, batch_id:200, loss:0.0210,             acc:0.9620 Tp/Tn_1/Tn_2: 49503/582/1371
Eval of epoch 21 => acc:0.9463, loss:0.0292
Saved best model of epoch21, acc 0.9463, save path "./runs"
Epoch 22: CosineAnnealingDecay set learning rate to 0.0004217827674798846.
2023-03-14 08:46:38 || Epoch 22 start:
epoch:22, batch_id:0, loss:0.0143,             acc:0.9727 Tp/Tn_1/Tn_2: 249/2/5
epoch:22, batch_id:50, loss:0.0343,             acc:0.9646 Tp/Tn_1/Tn_2: 12594/133/329
epoch:22, batch_id:100, loss:0.0341,             acc:0.9646 Tp/Tn_1/Tn_2: 24941/259/656
epoch:22, batch_id:150, loss:0.0310,             acc:0.9640 Tp/Tn_1/Tn_2: 37264/415/977
epoch:22, batch_id:200, loss:0.0179,             acc:0.9639 Tp/Tn_1/Tn_2: 49601/538/1317
Eval of epoch 22 => acc:0.9495, loss:0.0232
Saved best model of epoch22, acc 0.9495, save path "./runs"
Epoch 23: CosineAnnealingDecay set learning rate to 0.0003832773180720472.
2023-03-14 08:48:52 || Epoch 23 start:
epoch:23, batch_id:0, loss:0.0235,             acc:0.9492 Tp/Tn_1/Tn_2: 243/3/10
epoch:23, batch_id:50, loss:0.0131,             acc:0.9656 Tp/Tn_1/Tn_2: 12607/130/319
epoch:23, batch_id:100, loss:0.0148,             acc:0.9656 Tp/Tn_1/Tn_2: 24967/246/643
epoch:23, batch_id:150, loss:0.0247,             acc:0.9649 Tp/Tn_1/Tn_2: 37301/393/962
epoch:23, batch_id:200, loss:0.0123,             acc:0.9645 Tp/Tn_1/Tn_2: 49628/525/1303
Eval of epoch 23 => acc:0.9488, loss:0.0246
Epoch 24: CosineAnnealingDecay set learning rate to 0.0003454915028125262.
2023-03-14 08:51:08 || Epoch 24 start:
epoch:24, batch_id:0, loss:0.0378,             acc:0.9531 Tp/Tn_1/Tn_2: 244/5/7
epoch:24, batch_id:50, loss:0.0296,             acc:0.9640 Tp/Tn_1/Tn_2: 12586/139/331
epoch:24, batch_id:100, loss:0.0256,             acc:0.9651 Tp/Tn_1/Tn_2: 24953/243/660
epoch:24, batch_id:150, loss:0.0261,             acc:0.9657 Tp/Tn_1/Tn_2: 37332/366/958
epoch:24, batch_id:200, loss:0.0256,             acc:0.9653 Tp/Tn_1/Tn_2: 49669/501/1286
Eval of epoch 24 => acc:0.9490, loss:0.0232
Epoch 25: CosineAnnealingDecay set learning rate to 0.00030865828381745506.
2023-03-14 08:53:31 || Epoch 25 start:
epoch:25, batch_id:0, loss:0.0302,             acc:0.9570 Tp/Tn_1/Tn_2: 245/6/5
epoch:25, batch_id:50, loss:0.0121,             acc:0.9678 Tp/Tn_1/Tn_2: 12635/125/296
epoch:25, batch_id:100, loss:0.0191,             acc:0.9664 Tp/Tn_1/Tn_2: 24986/248/622
epoch:25, batch_id:150, loss:0.0185,             acc:0.9669 Tp/Tn_1/Tn_2: 37375/352/929
epoch:25, batch_id:200, loss:0.0236,             acc:0.9666 Tp/Tn_1/Tn_2: 49735/481/1240
Eval of epoch 25 => acc:0.9480, loss:0.0251
Epoch 26: CosineAnnealingDecay set learning rate to 0.0002730047501302265.
2023-03-14 08:55:47 || Epoch 26 start:
epoch:26, batch_id:0, loss:0.0151,             acc:0.9648 Tp/Tn_1/Tn_2: 247/4/5
epoch:26, batch_id:50, loss:0.0103,             acc:0.9663 Tp/Tn_1/Tn_2: 12616/124/316
epoch:26, batch_id:100, loss:0.0193,             acc:0.9657 Tp/Tn_1/Tn_2: 24970/239/647
epoch:26, batch_id:150, loss:0.0229,             acc:0.9657 Tp/Tn_1/Tn_2: 37330/375/951
epoch:26, batch_id:200, loss:0.0194,             acc:0.9659 Tp/Tn_1/Tn_2: 49703/506/1247
Eval of epoch 26 => acc:0.9503, loss:0.0255
Saved best model of epoch26, acc 0.9503, save path "./runs"
Epoch 27: CosineAnnealingDecay set learning rate to 0.0002387507176420255.
2023-03-14 08:58:03 || Epoch 27 start:
epoch:27, batch_id:0, loss:0.0107,             acc:0.9766 Tp/Tn_1/Tn_2: 250/2/4
epoch:27, batch_id:50, loss:0.0247,             acc:0.9656 Tp/Tn_1/Tn_2: 12607/125/324
epoch:27, batch_id:100, loss:0.0139,             acc:0.9667 Tp/Tn_1/Tn_2: 24995/245/616
epoch:27, batch_id:150, loss:0.0154,             acc:0.9663 Tp/Tn_1/Tn_2: 37353/368/935
epoch:27, batch_id:200, loss:0.0130,             acc:0.9664 Tp/Tn_1/Tn_2: 49727/487/1242
Eval of epoch 27 => acc:0.9501, loss:0.0248
Epoch 28: CosineAnnealingDecay set learning rate to 0.0002061073738537634.
2023-03-14 09:00:25 || Epoch 28 start:
epoch:28, batch_id:0, loss:0.0237,             acc:0.9766 Tp/Tn_1/Tn_2: 250/3/3
epoch:28, batch_id:50, loss:0.0182,             acc:0.9688 Tp/Tn_1/Tn_2: 12648/117/291
epoch:28, batch_id:100, loss:0.0199,             acc:0.9677 Tp/Tn_1/Tn_2: 25020/238/598
epoch:28, batch_id:150, loss:0.0138,             acc:0.9683 Tp/Tn_1/Tn_2: 37430/342/884
epoch:28, batch_id:200, loss:0.0227,             acc:0.9676 Tp/Tn_1/Tn_2: 49791/471/1194
Eval of epoch 28 => acc:0.9512, loss:0.0237
Saved best model of epoch28, acc 0.9512, save path "./runs"
Epoch 29: CosineAnnealingDecay set learning rate to 0.00017527597583490815.
2023-03-14 09:02:42 || Epoch 29 start:
epoch:29, batch_id:0, loss:0.0097,             acc:0.9805 Tp/Tn_1/Tn_2: 251/1/4
epoch:29, batch_id:50, loss:0.0237,             acc:0.9668 Tp/Tn_1/Tn_2: 12622/112/322
epoch:29, batch_id:100, loss:0.0100,             acc:0.9691 Tp/Tn_1/Tn_2: 25058/205/593
epoch:29, batch_id:150, loss:0.0204,             acc:0.9692 Tp/Tn_1/Tn_2: 37464/318/874
epoch:29, batch_id:200, loss:0.0281,             acc:0.9692 Tp/Tn_1/Tn_2: 49871/440/1145
Eval of epoch 29 => acc:0.9509, loss:0.0236
Epoch 30: CosineAnnealingDecay set learning rate to 0.0001464466094067262.
2023-03-14 09:04:58 || Epoch 30 start:
epoch:30, batch_id:0, loss:0.0299,             acc:0.9453 Tp/Tn_1/Tn_2: 242/4/10
epoch:30, batch_id:50, loss:0.0218,             acc:0.9677 Tp/Tn_1/Tn_2: 12634/125/297
epoch:30, batch_id:100, loss:0.0141,             acc:0.9683 Tp/Tn_1/Tn_2: 25036/236/584
epoch:30, batch_id:150, loss:0.0326,             acc:0.9684 Tp/Tn_1/Tn_2: 37433/345/878
epoch:30, batch_id:200, loss:0.0217,             acc:0.9688 Tp/Tn_1/Tn_2: 49853/442/1161
Eval of epoch 30 => acc:0.9515, loss:0.0228
Saved best model of epoch30, acc 0.9515, save path "./runs"
Epoch 31: CosineAnnealingDecay set learning rate to 0.00011979701719998447.
2023-03-14 09:07:14 || Epoch 31 start:
epoch:31, batch_id:0, loss:0.0116,             acc:0.9727 Tp/Tn_1/Tn_2: 249/2/5
epoch:31, batch_id:50, loss:0.0157,             acc:0.9681 Tp/Tn_1/Tn_2: 12640/129/287
epoch:31, batch_id:100, loss:0.0147,             acc:0.9688 Tp/Tn_1/Tn_2: 25048/245/563
epoch:31, batch_id:150, loss:0.0310,             acc:0.9680 Tp/Tn_1/Tn_2: 37420/357/879
epoch:31, batch_id:200, loss:0.0162,             acc:0.9689 Tp/Tn_1/Tn_2: 49857/452/1147
Eval of epoch 31 => acc:0.9514, loss:0.0249
Epoch 32: CosineAnnealingDecay set learning rate to 9.549150281252627e-05.
2023-03-14 09:09:29 || Epoch 32 start:
epoch:32, batch_id:0, loss:0.0365,             acc:0.9414 Tp/Tn_1/Tn_2: 241/4/11
epoch:32, batch_id:50, loss:0.0192,             acc:0.9684 Tp/Tn_1/Tn_2: 12644/117/295
epoch:32, batch_id:100, loss:0.0156,             acc:0.9693 Tp/Tn_1/Tn_2: 25062/227/567
epoch:32, batch_id:150, loss:0.0100,             acc:0.9706 Tp/Tn_1/Tn_2: 37519/325/812
epoch:32, batch_id:200, loss:0.0324,             acc:0.9696 Tp/Tn_1/Tn_2: 49893/449/1114
Eval of epoch 32 => acc:0.9520, loss:0.0249
Saved best model of epoch32, acc 0.9520, save path "./runs"
Epoch 33: CosineAnnealingDecay set learning rate to 7.367991782295387e-05.
2023-03-14 09:11:45 || Epoch 33 start:
epoch:33, batch_id:0, loss:0.0107,             acc:0.9766 Tp/Tn_1/Tn_2: 250/1/5
epoch:33, batch_id:50, loss:0.0151,             acc:0.9706 Tp/Tn_1/Tn_2: 12672/106/278
epoch:33, batch_id:100, loss:0.0129,             acc:0.9701 Tp/Tn_1/Tn_2: 25084/216/556
epoch:33, batch_id:150, loss:0.0274,             acc:0.9701 Tp/Tn_1/Tn_2: 37499/325/832
epoch:33, batch_id:200, loss:0.0208,             acc:0.9697 Tp/Tn_1/Tn_2: 49898/440/1118
Eval of epoch 33 => acc:0.9514, loss:0.0231
Epoch 34: CosineAnnealingDecay set learning rate to 5.4496737905816076e-05.
2023-03-14 09:14:01 || Epoch 34 start:
epoch:34, batch_id:0, loss:0.0155,             acc:0.9570 Tp/Tn_1/Tn_2: 245/6/5
epoch:34, batch_id:50, loss:0.0237,             acc:0.9697 Tp/Tn_1/Tn_2: 12660/101/295
epoch:34, batch_id:100, loss:0.0227,             acc:0.9708 Tp/Tn_1/Tn_2: 25102/205/549
epoch:34, batch_id:150, loss:0.0494,             acc:0.9707 Tp/Tn_1/Tn_2: 37523/300/833
epoch:34, batch_id:200, loss:0.0179,             acc:0.9710 Tp/Tn_1/Tn_2: 49963/402/1091
Eval of epoch 34 => acc:0.9517, loss:0.0232
Epoch 35: CosineAnnealingDecay set learning rate to 3.806023374435661e-05.
2023-03-14 09:16:17 || Epoch 35 start:
epoch:35, batch_id:0, loss:0.0117,             acc:0.9688 Tp/Tn_1/Tn_2: 248/4/4
epoch:35, batch_id:50, loss:0.0140,             acc:0.9710 Tp/Tn_1/Tn_2: 12677/98/281
epoch:35, batch_id:100, loss:0.0220,             acc:0.9712 Tp/Tn_1/Tn_2: 25112/211/533
epoch:35, batch_id:150, loss:0.0151,             acc:0.9701 Tp/Tn_1/Tn_2: 37500/337/819
epoch:35, batch_id:200, loss:0.0086,             acc:0.9700 Tp/Tn_1/Tn_2: 49914/443/1099
Eval of epoch 35 => acc:0.9522, loss:0.0236
Saved best model of epoch35, acc 0.9522, save path "./runs"
Epoch 36: CosineAnnealingDecay set learning rate to 2.447174185242322e-05.
2023-03-14 09:18:34 || Epoch 36 start:
epoch:36, batch_id:0, loss:0.0160,             acc:0.9727 Tp/Tn_1/Tn_2: 249/3/4
epoch:36, batch_id:50, loss:0.0150,             acc:0.9704 Tp/Tn_1/Tn_2: 12669/106/281
epoch:36, batch_id:100, loss:0.0269,             acc:0.9705 Tp/Tn_1/Tn_2: 25094/217/545
epoch:36, batch_id:150, loss:0.0131,             acc:0.9707 Tp/Tn_1/Tn_2: 37522/325/809
epoch:36, batch_id:200, loss:0.0088,             acc:0.9702 Tp/Tn_1/Tn_2: 49921/446/1089
Eval of epoch 36 => acc:0.9519, loss:0.0239
Epoch 37: CosineAnnealingDecay set learning rate to 1.3815039801161714e-05.
2023-03-14 09:20:51 || Epoch 37 start:
epoch:37, batch_id:0, loss:0.0164,             acc:0.9766 Tp/Tn_1/Tn_2: 250/2/4
epoch:37, batch_id:50, loss:0.0177,             acc:0.9702 Tp/Tn_1/Tn_2: 12667/103/286
epoch:37, batch_id:100, loss:0.0291,             acc:0.9703 Tp/Tn_1/Tn_2: 25087/206/563
epoch:37, batch_id:150, loss:0.0169,             acc:0.9703 Tp/Tn_1/Tn_2: 37508/318/830
epoch:37, batch_id:200, loss:0.0108,             acc:0.9705 Tp/Tn_1/Tn_2: 49937/424/1095
Eval of epoch 37 => acc:0.9515, loss:0.0237
Epoch 38: CosineAnnealingDecay set learning rate to 6.155829702431168e-06.
2023-03-14 09:23:10 || Epoch 38 start:
epoch:38, batch_id:0, loss:0.0203,             acc:0.9727 Tp/Tn_1/Tn_2: 249/3/4
epoch:38, batch_id:50, loss:0.0175,             acc:0.9707 Tp/Tn_1/Tn_2: 12673/117/266
epoch:38, batch_id:100, loss:0.0273,             acc:0.9706 Tp/Tn_1/Tn_2: 25096/220/540
epoch:38, batch_id:150, loss:0.0321,             acc:0.9711 Tp/Tn_1/Tn_2: 37537/318/801
epoch:38, batch_id:200, loss:0.0148,             acc:0.9710 Tp/Tn_1/Tn_2: 49964/428/1064
Eval of epoch 38 => acc:0.9519, loss:0.0235
Epoch 39: CosineAnnealingDecay set learning rate to 1.5413331334360176e-06.
2023-03-14 09:25:33 || Epoch 39 start:
epoch:39, batch_id:0, loss:0.0371,             acc:0.9492 Tp/Tn_1/Tn_2: 243/4/9
epoch:39, batch_id:50, loss:0.0176,             acc:0.9675 Tp/Tn_1/Tn_2: 12632/100/324
epoch:39, batch_id:100, loss:0.0243,             acc:0.9704 Tp/Tn_1/Tn_2: 25090/200/566
epoch:39, batch_id:150, loss:0.0105,             acc:0.9702 Tp/Tn_1/Tn_2: 37504/301/851
epoch:39, batch_id:200, loss:0.0256,             acc:0.9702 Tp/Tn_1/Tn_2: 49925/404/1127
Eval of epoch 39 => acc:0.9523, loss:0.0236
Saved best model of epoch39, acc 0.9523, save path "./runs"
Epoch 40: CosineAnnealingDecay set learning rate to 0.0.

验证,测试

当batchsize从256设置为1时,验证集的准确率从95.23%降低到93.96%,可能是网络中如下代码的问题

# line 103
f_pow = paddle.pow(f, 2)
f_mean = paddle.mean(f_pow)
f = paddle.divide(f, f_mean)

这里的mean方法与batch耦合,可以考虑限制维度来解耦(只在每个batch内做平均)比如:

f_mean = paddle.mean(f_pow, axis=[1,2,3], keepdim=True)
验证
import paddle.vision.transforms as T
from paddle.io import DataLoader
import time
from statistics import mean

# 参数定义
IMGSIZE = (94, 24)
IMGDIR = 'rec_images/data'
VALIDFILE = 'rec_images/valid.txt'
LPRMAXLEN = 18
EVALBATCHSIZE = 1
NUMWORKERS = 2

# 图片预处理
eval_transforms = T.Compose([    
            T.ToTensor(data_format='CHW'), 
            T.Normalize(
                [0.5, 0.5, 0.5],  # 在totensor的时候已经将图片缩放到0-1
                [0.5, 0.5, 0.5],
                data_format='CHW' 
            ), 
        ])

# 数据加载
eval_data_set = LprnetDataloader(IMGDIR, VALIDFILE, eval_transforms)
eval_loader = DataLoader(
    eval_data_set, 
    batch_size=EVALBATCHSIZE, 
    shuffle=False, 
    num_workers=NUMWORKERS, 
    drop_last=False, 
    collate_fn=collate_fn
)

# 定义loss
loss_func = nn.CTCLoss(len(CHARS)-1)

# input_length, loss计算需要
input_length = np.ones(shape=TRAINBATCHSIZE) * LPRMAXLEN
input_length = paddle.to_tensor(input_length, dtype='int64')

# LPRNet网络,添加模型权重
model = LPRNet(LPRMAXLEN, len(CHARS), DROPOUT)
load_pretrained(model, 'runs/lprnet_best_2')  

# acc
acc_eval = ACC()

# 验证
# eval
with paddle.no_grad():
    model.eval()
    loss_list = []
    for batch_id, bath_data in enumerate(eval_loader):
        img_data, label_data, label_lens = bath_data
        predict = model(img_data)
        logits = paddle.transpose(predict, (2,0,1))
        loss = loss_func(logits, label_data, input_length, label_lens)
        acc_eval.batch_update(label_data, label_lens, predict)
        loss_list.append(loss.item())
    print(f'Eval from {VALIDFILE} => acc:{acc_eval.acc:.4f}, loss:{mean(loss_list):.4f}')
    acc_eval.clear()

params loading...
load runs/lprnet_best_2.pdparams success...
Eval from rec_images/valid.txt => acc:0.9396, loss:0.0580
预测结果可视化

这里仍然是以动态图的方式进行预测,想要部署的话建议转静态图

https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/jit/index_cn.html

"""此部分为测试的可视化代码, 后处理可参考"""
import cv2
import matplotlib.pyplot as plt
import numpy as np
import paddle
%matplotlib inline

img_path = 'rec_images/data/皖AF358Z.jpg'
img_data = cv2.imread(img_path)
img_data = img_data[:,:,::-1]  # BGR to RGB
plt.imshow(img_data)
plt.axis('off')
plt.show()

# 数据前处理
img_data = cv2.resize(img_data,(94, 24))
img_data = (img_data - 127.5) / 127.5  # 归一化
img_data = np.transpose(img_data, (2,0,1))  # HWC to CHW
img_data = np.expand_dims(img_data, 0)  # to BCHW
img_tensor = paddle.to_tensor(img_data, dtype='float32')  # shape == [1, 3, 24, 94]
print(img_tensor.shape)

# 加载模型, 预测
CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
         '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
         '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
         '新',
         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
         'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
         'W', 'X', 'Y', 'Z', 'I', 'O', '-'
         ]
LPRMAXLEN = 18
model = LPRNet(LPRMAXLEN, len(CHARS), dropout_rate=0)
load_pretrained(model, 'runs/lprnet_best_2')
out_data = model(img_tensor)  # out_data.shape == [1, 68, 18]

# 后处理,单张图片数据
def reprocess(pred):
    pred_data = pred[0]
    pred_label = np.argmax(pred_data, axis=0)
    no_repeat_blank_label = []
    pre_c = pred_label[0]
    if pre_c != len(CHARS) - 1:  # 非空白
        no_repeat_blank_label.append(pre_c)
    for c in pred_label:  # dropout repeate label and blank label
        if (pre_c == c) or (c == len(CHARS) - 1):
            if c == len(CHARS) - 1:
                pre_c = c
            continue
        no_repeat_blank_label.append(c)
        pre_c = c
    char_list = [CHARS[i] for i in no_repeat_blank_label]
    return ''.join(char_list)

rep_result = reprocess(out_data)
print(rep_result)  # 皖AF358Z

在这里插入图片描述

[1, 3, 24, 94]
params loading...
load runs/lprnet_best_2.pdparams success...
皖AF358Z

TODO

  1. 数据集分布不均,这里只用了ccpd2019的蓝牌,可以使用ccpd2020包含绿牌数据

  2. 大部分车牌都是“皖”,可以适当添加其他省份的车牌数据

  3. 车牌裁切没有做矫正,想提高精度,可考虑加上车牌的矫正算法

  4. 本车牌识别网络模型与batch数据耦合,可以尝试解耦后再训练

  5. 网络模型已经固定了输出序列的长度18,考虑修改为能自定义长度,让模型能适用于更多场景

模型导出

导出onnx

这里将模型从动态图导出onnx文件, 直接使用api:paddle.onnx.export

https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/onnx/export_cn.html


model = LPRNet(18, 68, dropout_rate=0)
load_pretrained(model, 'runs/lprnet_best_2')

save_path = 'save_onnx/lprnet' # 需要保存的路径
x_spec = paddle.static.InputSpec([1, 3, 24, 94], 'float32', 'image') 
paddle.onnx.export(model, save_path, input_spec=[x_spec], opset_version=11)
params loading...
load runs/lprnet_best_2.pdparams success...


/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."


2023-03-14 11:50:25 [INFO]	Static PaddlePaddle model saved in save_onnx/paddle_model_static_onnx_temp_dir.
[Paddle2ONNX] Start to parse PaddlePaddle model...
[Paddle2ONNX] Model file path: save_onnx/paddle_model_static_onnx_temp_dir/model.pdmodel
[Paddle2ONNX] Paramters file path: save_onnx/paddle_model_static_onnx_temp_dir/model.pdiparams
[Paddle2ONNX] Start to parsing Paddle model...
[Paddle2ONNX] Use opset_version = 11 for ONNX export.
[Paddle2ONNX] PaddlePaddle model is exported as ONNX format now.
2023-03-14 11:50:25 [INFO]	ONNX model saved in save_onnx/lprnet.onnx.
onnx测试

可以在https://netron.app/ 中查看可视化结构

更多参考:https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/advanced/model_to_onnx_cn.html

!pip install onnx==1.10.2
!pip install onnxruntime==1.9.0
"""检查onnx是否合理,模型的版本、图的结构、节点及其输入和输出"""

import onnx
onnx_model = onnx.load("save_onnx/lprnet.onnx")
check = onnx.checker.check_model(onnx_model)
print('check: ', check)
# 导入所需的库
import numpy as np
import onnxruntime
import paddle

# 随机生成输入,用于验证 Paddle 和 ONNX 的推理结果是否一致
x = np.random.random((1, 3, 24, 94)).astype('float32')

# predict by ONNXRuntime
onnx_path = "save_onnx/lprnet.onnx"
ort_sess = onnxruntime.InferenceSession(onnx_path)
ort_inputs = {ort_sess.get_inputs()[0].name: x}
ort_outs = ort_sess.run(None, ort_inputs)

print("Exported model has been predicted by ONNXRuntime!")

# predict by Paddle
model = paddle.jit.load("save_onnx/paddle_model_static_onnx_temp_dir/model")  # 上一步中导出onnx的时候会保存静态图文件到输出目录
model.eval()
paddle_input = paddle.to_tensor(x)
paddle_outs = model(paddle_input)

print("Original model has been predicted by Paddle!")

# compare ONNXRuntime and Paddle results
np.testing.assert_allclose(ort_outs[0], paddle_outs.numpy(), rtol=1.0, atol=1e-05)

print("The difference of results between ONNXRuntime and Paddle looks good!")
Exported model has been predicted by ONNXRuntime!


W0314 12:10:11.987738 21149 gpu_resources.cc:61] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 11.2, Runtime API Version: 11.2
W0314 12:10:11.993809 21149 gpu_resources.cc:91] device: 0, cuDNN Version: 8.2.


Original model has been predicted by Paddle!
The difference of results between ONNXRuntime and Paddle looks good!
onnx推理

推理与上面相同,只是添加了实际数据的前处理和模型输出的后处理部分

import onnxruntime
import cv2
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline

# 数据预处理
img_path = 'rec_images/data/皖AF358Z.jpg'
img_data = cv2.imread(img_path)
img_data = img_data[:,:,::-1]  # BGR to RGB
plt.imshow(img_data)
plt.axis('off')
plt.show()

img_data = cv2.resize(img_data,(94, 24))
img_data = (img_data - 127.5) / 127.5  # 归一化
img_data = np.transpose(img_data, (2,0,1))  # HWC to CHW
img_data = np.expand_dims(img_data, 0)  # to BCHW
np_data = np.array(img_data, dtype=np.float32)

# 加载 ONNX 模型生成推理用 sess
onnx_path = "save_onnx/lprnet.onnx"
sess = onnxruntime.InferenceSession(onnx_path)

# 使用 ONNXRuntime 推理
ort_inputs = {sess.get_inputs()[0].name: np_data}
result, = sess.run(None, ort_inputs)

# 推理结果后处理
CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
         '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
         '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
         '新',
         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
         'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
         'W', 'X', 'Y', 'Z', 'I', 'O', '-'
         ]
def reprocess(pred):
    pred_data = pred[0]
    pred_label = np.argmax(pred_data, axis=0)
    no_repeat_blank_label = []
    pre_c = pred_label[0]
    if pre_c != len(CHARS) - 1:  # 非空白
        no_repeat_blank_label.append(pre_c)
    for c in pred_label:  # dropout repeate label and blank label
        if (pre_c == c) or (c == len(CHARS) - 1):
            if c == len(CHARS) - 1:
                pre_c = c
            continue
        no_repeat_blank_label.append(c)
        pre_c = c
    char_list = [CHARS[i] for i in no_repeat_blank_label]
    return ''.join(char_list)

plate_str = reprocess(result)
print(plate_str)  # 皖AF358Z

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-t15UautG-1682140132399)(main_files/main_34_0.png)]

皖AF358Z

1):
if c == len(CHARS) - 1:
pre_c = c
continue
no_repeat_blank_label.append©
pre_c = c
char_list = [CHARS[i] for i in no_repeat_blank_label]
return ‘’.join(char_list)

plate_str = reprocess(result)
print(plate_str) # 皖AF358Z



![在这里插入图片描述](https://img-blog.csdnimg.cn/aebff73fe90d4021a497cec9c181fc24.png)

    


    皖AF358Z


皖AF358Z.jpg请点击[此处](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576)查看本环境基本用法.  <br>
Please click [here ](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576) for more detailed instructions. 



Logo

学大模型,用大模型上飞桨星河社区!每天8点V100G算力免费领!免费领取ERNIE 4.0 100w Token >>>

更多推荐