master
/ test.py

test.py @master raw · history · blame

from loadData import DataLayer
from model import VGG16, Network
import torch
from lib.bbox_transform import bbox_transform_inv
import os
import numpy as np
from torchvision.ops import nms
from config import cfg
import cv2
from inference import _clip_boxes
from voc_eval import voc_eval

weight_output = "./weight_output"

def ap_test():
    pred_values = []
    data_layer_test = DataLayer(os.path.join("dataset", "test"))
    net = VGG16()
    # Construct the computation graph
    net.create_architecture(2, tag='default', anchor_scales=[8, 16, 32], anchor_ratios=[0.5, 1, 2])
    net.eval()
    # if not torch.cuda.is_available():
    net._device = 'cpu'
    net.to(net._device)

    # 加载模型
    list_dir = os.listdir(weight_output)
    if len(list_dir) == 0:
        net.load_pretrained_cnn(torch.load("./imagenet_weights/vgg16.pth"))
    else:
        # net.load_state_dict(torch.load(os.path.join(weight_output, list_dir[-1])))
        net.load_state_dict(torch.load(os.path.join(weight_output, list_dir[-1]), map_location='cpu'))

    print("load model params successfully. ")

    for i in range(data_layer_test.length):
        blobs = data_layer_test.forward()
        im_blob = blobs['data']
        im_scale = blobs['im_info'][2]
        _, scores, bbox_pred, rois = net.test_image(blobs['data'], blobs['im_info'])
        boxes = rois[:, 1:5] / im_scale
        img = cv2.resize(
            im_blob[0],
            None,
            None,
            fx=1 / im_scale,
            fy=1 / im_scale,
            interpolation=cv2.INTER_LINEAR)

        img += cfg.PIXEL_MEANS

        scores = np.reshape(scores, [scores.shape[0], -1])
        bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])

        # Apply bounding-box regression deltas
        box_deltas = bbox_pred
        pred_boxes = bbox_transform_inv(
            torch.from_numpy(boxes), torch.from_numpy(box_deltas)).numpy()
        pred_boxes = _clip_boxes(pred_boxes, img.shape)

        # skip j = 0, because it's the background class
        for j in range(1, 2):
            inds = np.where(scores[:, j] > 0.5)[0]
            cls_scores = scores[inds, j]
            cls_boxes = pred_boxes[inds, j * 4:(j + 1) * 4]
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
                .astype(np.float32, copy=False)
            # NMS 操作,剔除一部分box
            keep = nms(
                torch.from_numpy(cls_boxes), torch.from_numpy(cls_scores),
                cfg.TEST.NMS).numpy() if cls_dets.size > 0 else []
            cls_dets = cls_dets[keep, :]

            height, width, channel = img.shape
            for r in range(len(cls_dets)):
                # 画预测的框
                left = float(max(cls_dets[r][0] + 1, 0))
                top = float(max(cls_dets[r][1] + 1, 0))
                right = float(min(cls_dets[r][2] + 1, width))
                bottom = float(min(cls_dets[r][3] + 1, height))

                pred_values.append([data_layer_test.filenames[i], cls_dets[r][4], left, top, right, bottom])

    return voc_eval(pred_values, os.path.join("dataset", "test", "Annotations"), "balloon")

if __name__ == '__main__':
    # os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    rec, prec, ap = ap_test()
    print(rec)
    print(prec)
    print(ap)
    score = 0
    if ap <= 0:
        score = 0
    elif ap > 0 and ap <= 0.1:
        score = 60
    elif ap > 0.1 and ap <= 0.2:
        score = 70
    elif ap > 0.2 and ap <= 0.3:
        score = 80
    elif ap > 0.3 and ap <= 0.4:
        score = 90
    else:
        score = 100
    print("score: ", score)