用模型跑一下数据,结果提示OSError: SavedModel file does not exist at: on_object_test.h5\{saved_model.pbtxt|save

       请大佬看看是什么问题,太头疼了,源代码如下

     源代码如下数据集:Pascal VOC Dataset Mirror

import os
import sys
import xml.etree.ElementTree as ET
import cv2
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # 不显示等级2以下的提示信息
import tensorflow as tf
from tensorflow.python.keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Reshape, Concatenate, \
    concatenate, ZeroPadding2D, Convolution2D,BatchNormalization, Activation, AveragePooling2D, Add
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.losses import categorical_crossentropy,binary_crossentropy
import numpy as np
from tensorflow.python.keras.saving.save import load_model
from tensorflow.python.ops.init_ops_v2 import glorot_uniform
from tensorflow.python.ops.losses.losses_impl import mean_squared_error

batch_size=32
input_size=224
# https://juejin.cn/post/6844903908570054670
xmls_path='E:\\VOCdevkit\\VOC2007\\Annotations'
imgs_path='E:\\VOCdevkit\\VOC2007\\JPEGImages'
catalogs=['aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow','diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']
# region 对图片和标签进行预处理,使用迭代器处理整个过程
def generator_data():
    global batch_size
    annotations = os.listdir(xmls_path)
    # 随机打乱
    np.random.shuffle(annotations)
    images=[]
    classes=[]
    labels=[]
    while True:
        for anno in annotations:
            anno_path = os.path.join(xmls_path,anno)
            tree = ET.parse(anno_path)
            root = tree.getroot()
            # 图片名称
            img_name = root.find('filename').text
            width = int(root.find('size/width').text)
            height = int(root.find('size/height').text)
            obj_name = root.find('object/name').text
            xmin = int(root.find('object/bndbox/xmin').text)
            ymin = int(root.find('object/bndbox/ymin').text)
            xmax = int(root.find('object/bndbox/xmax').text)
            ymax = int(root.find('object/bndbox/ymax').text)

            label = [xmin,ymin,xmax,ymax]
            # size=[width,height]
            # x1, y1, x2, y2 = label
            # if y1 >= y2:
            #     print(anno_path, label)
            #     break
            img_path = os.path.join(imgs_path, img_name)
            if os.path.exists(img_path):
                image = cv2.imread(img_path)
                image , label = image_plus(image,label)

                # 设置image 的resize 为input_size
                image = cv2.resize(image,(input_size,input_size))
                label = fix_label_scale(label,[height,width])
                label = convert_to_mse(label)
                obj_catalog=np.zeros(dtype=float,shape=len(catalogs))
                obj_catalog_idx=catalogs.index(obj_name)
                obj_catalog[obj_catalog_idx]=1

                classes.append(obj_catalog)
                images.append(image)
                labels.append(label)


                if(len(images)>=batch_size):
                    yield (np.array(images),{'class_head':np.array(classes), 'reg_head':np.array(labels)})
                    images= []
                    labels=[]
                    classes=[]

def generator_vaild_data():
    global batch_size
    annotations = os.listdir(xmls_path)
    # 随机打乱
    np.random.shuffle(annotations)
    images=[]
    classes=[]
    labels=[]
    while True:
        for anno in annotations:
            anno_path = os.path.join(xmls_path,anno)
            tree = ET.parse(anno_path)
            root = tree.getroot()
            # 图片名称
            img_name = root.find('filename').text
            width = int(root.find('size/width').text)
            height = int(root.find('size/height').text)
            obj_name = root.find('object/name').text
            xmin = int(root.find('object/bndbox/xmin').text)
            ymin = int(root.find('object/bndbox/ymin').text)
            xmax = int(root.find('object/bndbox/xmax').text)
            ymax = int(root.find('object/bndbox/ymax').text)

            label = [xmin,ymin,xmax,ymax]
            # size=[width,height]

            img_path = os.path.join(imgs_path, img_name)
            if os.path.exists(img_path):
                image = cv2.imread(img_path)
                image , label = image_plus(image,label)

                # 设置image 的resize 为input_size
                image = cv2.resize(image,(input_size,input_size))
                label = fix_label_scale(label,[height,width])
                # if label[0]>=label[2]:
                #     print('error:',label)
                #     break
                label=convert_to_mse(label)
                obj_catalog = np.zeros(dtype=float, shape=len(catalogs))
                obj_catalog_idx = catalogs.index(obj_name)
                obj_catalog[obj_catalog_idx] = 1

                classes.append(obj_catalog)
                images.append(image)
                labels.append(label)

                if(len(images)>=batch_size*10):
                    return (np.array(images),{'class_head':np.array(classes), 'reg_head':np.array(labels)})

# 将x,y,w,h转换为x1,y1,x2,y2
# x,y 为中心点
def convert_to_point(bbox):
    x,y,w,h = bbox
    return [
        x-(w/2),
        y-(h/2),
        x + (w / 2),
        y + (h / 2),
    ]

# 将x1,y1,x2,y2转换为x,y,w,h
def convert_to_mse(bbox):
    x1,y1,x2,y2=bbox
    return [(x2+x1)/2,
            (y2+y1)/2,
            x2-x1,
            y2-y1]


# 还原校准输入时候的网络模型的尺寸
def ref_label_scale(label,scale):
    x1, y1, x2, y2 = label
    w,h=input_size/scale[1],input_size/scale[0]
    label = [int(round(x1 / w)),
             int(round(y1 / h)),
             int(round(x2 / w)),
             int(round(y2 / h))]
    return label

# 校准输入时候的网络模型的尺寸
def label_scale(label,scale):
    x1, y1, x2, y2 = label
    scale=input_size/scale
    label = [int(round(x1 * scale)),
             int(round(y1 * scale)),
             int(round(x2 * scale)),
             int(round(y2 * scale))]
    return label

def fix_label_scale(label,scale):
    x1, y1, x2, y2 = label
    w,h=input_size/scale[1],input_size/scale[0]
    label = [int(round(x1 * w)),
             int(round(y1 * h)),
             int(round(x2 * w)),
             int(round(y2 * h))]
    return label

# 数据增强部分
# 随机缩放 扩展对应的标签也要变化
def random_scale(img, min_size,label):
    h, w, _ = img.shape
    scale = min_size / min(h, w) # 计算最小缩放比例
    new_w=int(round(scale*w))
    new_h=int(round(scale*h))
    x1,y1,x2,y2 =  label
    label=[int(round(x1*scale)),
           int(round(y1*scale)),
           int(round(x2*scale)),
           int(round(y2*scale))]
    img = cv2.resize(img, (new_w, new_h)) # 缩放图像
    return img,label

import random

#随机翻转
def random_flip(img, flip_ratio,label):
    h, w, _=img.shape
    x1,y1,x2,y2=label
    if random.random() < flip_ratio: # 随机概率翻转图像
        img = cv2.flip(img, 1) # 水平翻转
        label=[w-x2,y1,w-x1,y2]
    else:
        img =  cv2.flip(img, 0) # 垂直翻转
        label = [x1,h-y2,x2,h-y1]
    return img,label

import math
# 随机旋转
def random_rotate(img, angle_range,label):
    h, w, _ = img.shape
    angle = np.random.uniform(-angle_range, angle_range) # 生成随机旋转角度
    mat = cv2.getRotationMatrix2D((w/2, h/2), angle, 1.0) # 旋转矩阵
    img = cv2.warpAffine(img, mat, (w, h), flags=cv2.INTER_LINEAR, borderValue=(0, 0, 0)) # 仿射变换

    x1,y1,x2,y2=label
    #边框中心点位置
    x,y =  (x2-x1)/2,(y2-y1)/2
    # 旋转中心点
    cx=w/2
    cy=h/2
    # 将标签点转换到以旋转中心为原点的坐标系下
    x -= cx
    y -=cy
    # x1 -=cx
    # y1 -=cy
    # x2 -=cx
    # y2 -=cy
    angle = angle * np.pi /180.0 # 角度转弧度
    x_new = x * math.cos(angle) + y * math.sin(angle)
    y_new = -x * math.sin(angle) + y * math.cos(angle)
    # x1_new = x1 * math.cos(angle)+y1*math.sin(angle)
    # y1_new = -x1*math.sin(angle)+y1*math.cos(angle)
    # x2_new = x2 * math.cos(angle)+y2*math.sin(angle)
    # y2_new = -x2 * math.sin(angle)+y2*math.cos(angle)

    # 转回原坐标系下的坐标
    x_new +=cx
    y_new +=cy
    # x1_new +=cx
    # y1_new+=cy
    # x2_new+=cx
    # y2_new+=cy

    # 对旋转后的坐标进行裁剪,避免越界
    x_new= max(min(x_new,w),0)
    y_new= max(min(y_new,h),0)
    # x1_new = max(min(x1_new,w),0)
    # y1_new = max(min(y1_new, h), 0)
    # x2_new = max(min(x2_new, w), 0)
    # y2_new = max(min(y2_new, h), 0)

    x1_new = x_new-(x2-x1)/2
    y1_new = y_new-(y2-y1)/2

    x2_new = x_new+(x2-x1)/2
    y2_new = y_new + (x2-x1)/2

    # x1_new = (x1_new if x1_new>0 else 0 )
    # y1_new =(y1_new if y1_new>0 else 0)
    #
    # x2_new =(x2_new if x2_new>0 and x2_new<=w else w)
    # y2_new = (y2_new if y2_new>0 and y2_new<=h else h)


    if y1_new>=y2_new:
        print('random_rotate error',[x1_new,y1_new,x2_new,y2_new])
    return img,[x1_new,y1_new,x2_new,y2_new]

# 图片增强
def image_plus(img,label):
    return img,label
    # idx = np.random.randint(low=1,high=3,dtype=np.uint8)
    # if(idx==1): #随机缩放
    #     return random_scale(img,input_size+10,label)
    # if(idx==2): #随机翻转
    #     return random_flip(img,0.5,label)
    # if(idx==3):
    #     angle = np.random.randint(10,360)
    #     return random_rotate(img,angle,label)

# 绘制文本
def put_on_text(img,text,pos):
    font = cv2.FONT_HERSHEY_SIMPLEX
    fontScale = 1
    color = (255, 0, 0)
    thickness = 2
    img = cv2.putText(img, text, pos, font, fontScale, color, thickness)

    return img


#endregion

# region 构建ResNet50 网络,用于提取图片特征
# 这边的代码,也是属于ResNet 但是代码比较冗余
# 恒等块
def identity_block(X, f,filters):
    """
        实现ResNet中的恒等块

        参数:
        X - 输入的tensor类型的数据,维度为( m, n_H_prev, n_W_prev, n_C_prev)
        f - 整数,指定主路径中间的卷积层窗口的维度
        filters - 整数列表,定义了主路径中每层的卷积层的过滤器数量
        stage - 整数,用于命名层,取决于他们在网络中的位置
        block - 字符串,用于命名层,取决于他们在网络中的位置

        返回:
        X - 恒等块的输出,tensor类型,维度为(n_H, n_W, n_C)
        """
    # 获取过滤器
    F1, F2, F3 = filters
    # 保存输入数据,以便生成shortcut
    X_shortcut = X

    # 主路径的第一部分
    X=Convolution2D(filters=F1,kernel_size=(1,1),padding='valid',kernel_initializer=glorot_uniform(seed=0))(X)
    X=BatchNormalization(axis=3)(X)
    X=Activation('relu')(X)

    # 主路径的第二部分
    X=Convolution2D(filters=F2,kernel_size=(f,f),padding='same',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3)(X)
    X = Activation('relu')(X)

    # 主路径的第三部分
    X=Convolution2D(filters=F3,kernel_size=(1,1),padding='valid',kernel_initializer=glorot_uniform(seed=0))(X)
    X=BatchNormalization(axis=3)(X)
    X=Activation('relu')(X)

    # 添加shortcut并通过Relu激活
    X=Add()([X,X_shortcut])
    X = Activation('relu')(X)

    return X

# 瓶颈块
def convolutional_block(X,f,filters,s=2):
    # 获取过滤器
    F1, F2, F3 = filters
    # 保存输入数据,以便生成shortcut
    X_shortcut = X
    # 主路径的第一部分
    X = Convolution2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3)(X)
    X = Activation('relu')(X)

    # 主路径的第二部分
    X = Convolution2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3)(X)
    X = Activation('relu')(X)

    # 主路径的第三部分
    X = Convolution2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='same',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3)(X)
    X = Activation('relu')(X)

    # Shortcut路径部分
    X_shortcut = Convolution2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid',kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
    X_shortcut = BatchNormalization(axis=3)(X_shortcut)

    # 添加shortcut并通过Relu激活
    X=Add()([X,X_shortcut])
    X=Activation('relu')(X)

    return X
# 创建模型

def o_check_model(num_classes=1):
    num_anchors=1
    # 定义输入的tensor类型,shape于输入图像相同
    X_input = Input(shape=(input_size, input_size, 3), name='input_1')
    X = ZeroPadding2D((3, 3))(X_input)

    # stage 1
    X = Convolution2D(filters=64, kernel_size=(7, 7), strides=(2, 2), kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3)(X)
    X = Activation('relu')(X)
    X = MaxPooling2D((3, 3), strides=(2, 2))(X)

    # stage 2
    X = convolutional_block(X, f=3, filters=[64, 64, 256], s=1)
    X = identity_block(X, f=3, filters=[64, 64, 256])
    X = identity_block(X, f=3, filters=[64, 64, 256])

    # stage 3
    X = convolutional_block(X, f=3, filters=[128, 128, 512], s=2)
    X = identity_block(X, f=3, filters=[128, 128, 512])
    X = identity_block(X, f=3, filters=[128, 128, 512])
    X = identity_block(X, f=3, filters=[128, 128, 512])

    # stage 4
    X = convolutional_block(X, f=3, filters=[256, 256, 1024], s=2)
    X = identity_block(X, f=3, filters=[256, 256, 1024])
    X = identity_block(X, f=3, filters=[256, 256, 1024])
    X = identity_block(X, f=3, filters=[256, 256, 1024])
    X = identity_block(X, f=3, filters=[256, 256, 1024])
    X = identity_block(X, f=3, filters=[256, 256, 1024])

    # stage 5
    X = convolutional_block(X, f=4, filters=[512, 512, 2048], s=2)
    X = identity_block(X, f=3, filters=[512, 512, 2048])
    X = identity_block(X, f=3, filters=[512, 512, 2048])

    resNet = X
    X=AveragePooling2D((2,2))(X)
    X=Flatten()(X)
    class_head=Dense(num_classes,activation='softmax',name='class_head')(X)

    x = Conv2D(64, (3, 3), activation='relu', padding='same',name='f_f1')(resNet)
    x = BatchNormalization(axis=3)(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same',name='f_f2')(x)
    x = BatchNormalization(axis=3)(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same',name='f_f3')(x)
    x = BatchNormalization(axis=3)(x)
    x = Flatten()(x)
    # 回归头部
    reg_head = Dense(num_anchors*4,activation='linear',name='reg_head')(x)
    # 构建完整模型
    model = Model(inputs=X_input, outputs={'class_head':class_head, 'reg_head':reg_head})

    # 编译模型
    model.compile(optimizer='adam',
                  loss={
                        'class_head':'categorical_crossentropy',
                        'reg_head':'mean_squared_error'
                    },
                  loss_weights={
                      'class_head':1.0,
                      'reg_head':1.0
                  },
                  metrics={
                      'class_head':'accuracy',
                      'reg_head':'mae'
                  })

    # model.compile(optimizer='adam',
    #               loss={
    #                     'class_head':'categorical_crossentropy',
    #                     'reg_head':'mse'
    #                 },
    #               loss_weights={
    #                   'class_head':1.0,
    #                   'reg_head':1.0
    #               },
    #               metrics={
    #                   'class_head':'accuracy',
    #                   'reg_head':'mae'
    #               })

    # model.compile(optimizer='adam',
    #               loss=total_loss,
    #               metrics={
    #                   'class_head': 'accuracy',
    #                   'reg_head': 'mae'
    #               })

    # model.compile(optimizer='adam',
    #               loss='mean_squared_error',
    #               metrics='mae')

    # model.compile(optimizer='adam',
    #               loss=total_loss_2,
    #               metrics={
    #                   'class_head': 'accuracy',
    #                   'reg_head': 'mae'
    #               })

    # model.compile(optimizer='adam',
    #               loss=[categorical_cross_entropy_loss, smooth_l1_loss],
    #               loss_weights=[1.0, 1.0],
    #               metrics={
    #                   'class_head': 'accuracy',
    #                   'reg_head': 'mae'
    #               })
    model.summary()
    return model


# 基本的损失函数
def base_loss(y_true, y_pred):
    return categorical_crossentropy(y_true, y_pred)

def iou_loss(y_true,y_pred):
    print('1',y_true.shape,y_pred.shape)
    # 计算交集
    intersection = tf.reduce_sum(y_true * y_pred, axis=[1, 2, 3])
    print('2')
    # 计算并集
    union = tf.reduce_sum(y_true + y_pred, axis=[1, 2, 3]) - intersection

    # 计算 IoU
    iou = (intersection + 1e-7) / (union + 1e-7)

    # 将 IoU 转换为 IoU 损失
    iou_loss = 1 - iou
    print('iou',iou_loss)
    # 返回损失
    return iou_loss

# 总体损失函数
def total_loss(y_true, y_pred):
    print('y_true',y_true[0].shape,'y_pred',y_pred.shape)
    # 分类损失
    class_true = y_true[0]
    class_pred = y_pred[0]
    class_loss = base_loss(class_true, class_pred)
    # 回归损失
    reg_true = y_true[1]
    reg_pred = y_pred[1]
    reg_loss =mean_squared_error(reg_true,reg_pred) #iou_loss(reg_true,reg_pred) #

    # 组合损失
    total_loss = class_loss + reg_loss

    # 设置类型为浮点数类型 这个地方后期可能要调整
    # total_loss = tf.cast(total_loss,dtype=tf.int32)

    return total_loss

def smooth_l1_loss(y_true, y_pred):
    diff = tf.abs(tf.cast(y_true[1],dtype=tf.float32) - y_pred[1])
    less_than_one = tf.cast(tf.less(diff, 1.0), tf.float32)
    loss = (less_than_one * 0.5 * diff ** 2) + (1.0 - less_than_one) * (diff - 0.5)
    return tf.reduce_mean(loss)

# 分类损失函数:交叉熵损失
def categorical_cross_entropy_loss(y_true, y_pred):
    return categorical_crossentropy(y_true[0], y_pred[0])


def l2_loss(y_true, y_pred):
    return tf.reduce_sum(tf.square(tf.cast(y_true,dtype=tf.float32) -y_pred), axis=-1)

def total_loss_1(y_true,y_pred):
    # 分类损失
    class_true = y_true[0]
    class_pred = y_pred[0]
    class_loss = base_loss(class_true, class_pred)
    # 回归损失
    reg_true = y_true[1]
    reg_pred = y_pred[1]
    reg_loss =l2_loss(reg_true,reg_pred) #mean_squared_error(reg_true,reg_pred) #

    # 组合损失
    total_loss = class_loss + reg_loss

    return total_loss


def total_loss_2(y_true,y_pred):
    # 分类损失
    class_true = y_true[0]
    class_pred = y_pred[0]
    class_loss = base_loss(class_true, class_pred)

    l1=smooth_l1_loss(y_true,y_pred)

    return class_loss+l1

def custom_loss(y_true, y_pred):
    # 分类损失
    class_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true[..., :20], logits=y_pred[..., :20]))

    # 边界框损失
    bbox_loss = mean_squared_error(y_true,y_pred)

    # 总损失
    total_loss = class_loss + 1 * bbox_loss

    return total_loss


# 修改为resnet-50
# 残差块包含两种1:恒等残差   2:瓶颈残差
def resnet50_model(num_classes=1):
    num_anchors=1
    # 定义输入的tensor类型,shape于输入图像相同
    X_input=Input(shape=(input_size,input_size,3),name='input_1')
    X=ZeroPadding2D((3,3))(X_input)

    #stage 1
    X=Convolution2D(filters=64,kernel_size=(7,7),strides=(2,2),kernel_initializer=glorot_uniform(seed=0))(X)
    X=BatchNormalization(axis=3)(X)
    X=Activation('relu')(X)
    X=MaxPooling2D((3,3),strides=(2,2))(X)

    #stage 2
    X=convolutional_block(X,f=3,filters=[64,64,256],s=1)
    X=identity_block(X,f=3,filters=[64,64,256])
    X = identity_block(X, f=3, filters=[64, 64, 256])

    # stage 3
    X=convolutional_block(X,f=3,filters=[128,128,512],s=2)
    X=identity_block(X,f=3,filters=[128,128,512])
    X = identity_block(X, f=3, filters=[128,128,512])
    X = identity_block(X, f=3, filters=[128,128,512])

    # stage 4
    X = convolutional_block(X, f=3, filters=[256, 256, 1024], s=2)
    X = identity_block(X, f=3, filters=[256, 256, 1024])
    X = identity_block(X, f=3, filters=[256, 256, 1024])
    X = identity_block(X, f=3, filters=[256, 256, 1024])
    X = identity_block(X, f=3, filters=[256, 256, 1024])
    X = identity_block(X, f=3, filters=[256, 256, 1024])

    # stage 5
    X = convolutional_block(X, f=4, filters=[512, 512, 2048], s=2)
    X = identity_block(X, f=3, filters=[512, 512, 2048])
    X = identity_block(X, f=3, filters=[512, 512, 2048])

    model = Model(inputs=X_input,outputs=X)
    return model

    # # 平均池化
    # X =AveragePooling2D((2,2))(X)
    #
    # #输出层
    # X=Flatten()(X)

    # class_head = Dense(num_classes, activation='softmax', name='class_head')(X)
    # print('class head ok')
    # # 回归头部
    # reg_head = Dense(num_anchors * 4, activation='linear', name='reg_head')(X)
    # print('reg head ok')
    # # 构建完整模型
    # model = Model(inputs=X_input, outputs={'class_head':class_head, 'reg_head':reg_head})
    # # 编译模型
    # model.compile(optimizer='adam',
    #               loss={
    #                   'class_head': 'categorical_crossentropy',
    #                   'reg_head': 'mse'
    #               },
    #               loss_weights={
    #                   'class_head': 1.0,
    #                   'reg_head': 1.0
    #               },
    #               metrics={
    #                   'class_head': 'accuracy',
    #                   'reg_head': 'mae'
    #               })
    # model.summary()
    # return model

# endregion

from tensorflow.python.keras.callbacks import Callback


# 自定义回调函数,输出 y_pred 的值
class OutputCallback(Callback):
    def on_epoch_end(self, epoch, logs=None):
        # 在每个 epoch 结束时获取模型在训练集上的预测值
        y_pred = self.model.predict(valid_sets[0])
        print(f'y_pred: {y_pred}')
        print(f'y_true: {valid_sets[1]}')

valid_sets=generator_vaild_data()
def train():
    model = o_check_model(len(catalogs))
    print('load model')
    model.fit_generator(generator=generator_data(),
                        steps_per_epoch=math.ceil(5011/batch_size)+1,
                        epochs=100,
                        validation_data=valid_sets,)
    model.save('on_object_test.h5')


# train()
#

# # 测试转换label
# img = cv2.imread('D:\\chinese_img\\f\\VOCtrainval_06-Nov-2007\\VOCdevkit\\VOC2007\\JPEGImages\\000021.jpg')
# height,width,_=img.shape
# bbox=[1,235,182,388]
# x1,y1,x2,y2=bbox
# cv2.rectangle(img, (x1,y1), (x2,y2), (0, 255, 0), 2)
#
#
# img=cv2.resize(img,(input_size,input_size))
# label = fix_label_scale(bbox,[height,width])
# # x1,y1,x2,y2=label
# # cv2.rectangle(img, (int(x1),int(y1)), (int(x2),int(y2)), (0, 255, 0), 2)
# label = convert_to_mse(label)
# # x1,y1,x2,y2=label
# # cv2.rectangle(img, (int(x1),int(y1)), (int(x2),int(y2)), (0, 255, 255), 2)
#
# label = convert_to_point(label)
#
# label = ref_label_scale(label,[height,width])
# img=cv2.resize(img,(width,height))
# x1,y1,x2,y2=label
# cv2.rectangle(img, (int(x1),int(y1)), (int(x2),int(y2)), (0, 255, 255), 2)
#
# # 显示图像
# cv2.imshow('image', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()


model = load_model('on_object_test.h5')

# D:\\chinese_img\\920.png
# D:\\chinese_img\\f\\VOCtrainval_06-Nov-2007\\VOCdevkit\\VOC2007\\JPEGImages\\000007.jpg
# D:\\chinese_img\\20230507204908.jpg

print('请输入图片名称')
user_input = sys.stdin.readline()
while user_input!='q':
    user_input = user_input.strip()
    pos=[]
    img=cv2.imread('E:\\VOCdevkit\\VOC2007\\JPEGImages\\'+user_input+'.jpg')
    image = cv2.resize(img, (224, 224))
    pos.append(image)
    x_train = tf.convert_to_tensor(np.array(pos))
    rsp=model.predict(x=x_train)
    # 分类
    class_head = rsp['class_head'][0]
    idx = np.argmax(class_head)
    catalog = catalogs[idx]
    print('判断检测结果:'+catalog)

    # 定位
    reg_head = rsp['reg_head'][0]
    # x,y,w,h = reg_head
    # cv2.rectangle(img, (int(x),int(y)), (int(w),int(h)), (0, 255, 255), 2)
    print('训练结果:',reg_head)
    height,width,_ =img.shape
    # 还原
    # 还原为x1,y1,x2,y2
    bbox = convert_to_point(reg_head)
    print('训练还原结果:', bbox)
    bbox = ref_label_scale(bbox,[height,width])
    print('还原原始尺寸:', bbox)
    x1, y1, x2, y2 = bbox
    # 绘制区域
    cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
    img = put_on_text(img,catalog+'('+str(np.max(class_head))+')',(x1, y1))
    # 显示图像
    cv2.imshow('image', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()




    print('请输入图片名称')
    user_input = sys.stdin.readline()