Commit 9db35cbb authored by 蔡院强's avatar 蔡院强
Browse files

README.md

parents
Loading
Loading
Loading
Loading

GAnet_data.py

0 → 100644
+0 −0

File added.

Preview size limit exceeded, changes collapsed.

GAnet_eval_UAV.py

0 → 100644
+525 −0
Original line number Diff line number Diff line
import cv2
import time
import math
import os
import numpy as np
import tensorflow as tf
import zipfile
import locality_aware_nms as nms_locality
import lanms
from matplotlib import pyplot as plt
import imutils

# fixme: 1. 按比例预留:
tf_config = tf.ConfigProto()
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.30 # 分配50%
# session = tf.Session(config=tf_config)
# fixme: 2. 或者干脆自适应然后自动增长:
# tf_config = tf.ConfigProto()
# tf_config.gpu_options.allow_growth = True # 自适应
# session = tf.Session(config=tf_config)


trainID_list = ['M0101','M0201','M0202','M0204','M0206','M0207',
                'M0210','M0301','M0401','M0402','M0501','M0603',
                'M0604','M0605','M0702','M0703','M0704','M0901',
                'M0902','M1002','M1003','M1005','M1006','M1008',
                'M1102','M1201','M1202','M1304','M1305','M1306']

# testID_list = [ 'M0203','M0205','M0208','M0209','M0403','M0601',
#                 'M0602','M0606','M0701','M0801','M0802','M1001',
#                 'M1004','M1007','M1009','M1101','M1301','M1302',
#                 'M1303','M1401']

testID_list = ["Test_images"]

tf.app.flags.DEFINE_string('test_data_path', './UAV-benchmark-M', '')
tf.app.flags.DEFINE_string('gpu_list', '1', '')
tf.app.flags.DEFINE_integer('batch_size_per_gpu', 1, '')
tf.app.flags.DEFINE_string('checkpoint_path', 'GAnet_Vgg16_B10', '')
tf.app.flags.DEFINE_string('Img_output_dir', './GAnet_Vgg16_B10_B10_Res/img_B10_50001', '')
tf.app.flags.DEFINE_string('Txt_output_dir', './GAnet_Vgg16_B10_Res/txt_B10_50001', '')
tf.app.flags.DEFINE_string('ImgHeatMap_output_dir', './GAnet_Vgg16_B10_B10_Res/imgHeatMap_B10_50001', '')
tf.app.flags.DEFINE_bool('no_write_images', False, 'do not write images')

import GAnet_model_UAV_Vgg16 as model
from GAnet_data import restore_rectangle


FLAGS = tf.app.flags.FLAGS


def get_images(data_path):
    '''
    find image files in test data path
    :return: list of files found
    '''
    files = []
    exts = ['jpg', 'png', 'jpeg', 'JPG']
    for parent, dirnames, filenames in os.walk(data_path):
        for filename in filenames:
            for ext in exts:
                if filename.endswith(ext):
                    files.append(os.path.join(parent, filename))
                    break
    print('Find {} images'.format(len(files)))
    return files


def resize_image(im, max_side_len=1024):
    '''
    resize image to a size multiple of 32 which is required by the network
    :param im: the resized image
    :param max_side_len: limit of max image size to avoid out of memory in gpu
    :return: the resized image and the resize ratio
    '''
    h, w, _ = im.shape

    resize_w = w
    resize_h = h

    # limit the max side
    if max(resize_h, resize_w) > max_side_len:
        ratio = float(max_side_len) / resize_h if resize_h > resize_w else float(max_side_len) / resize_w
    else:
        ratio = 1.
    resize_h = int(resize_h * ratio)
    resize_w = int(resize_w * ratio)

    resize_h = resize_h if resize_h % 32 == 0 else (resize_h // 32 - 1) * 32
    resize_w = resize_w if resize_w % 32 == 0 else (resize_w // 32 - 1) * 32
    resize_h = max(32, resize_h)
    resize_w = max(32, resize_w)
    im = cv2.resize(im, (int(resize_w), int(resize_h)))

    ratio_h = resize_h / float(h)
    ratio_w = resize_w / float(w)

    return im, (ratio_h, ratio_w)


def detect(score_map, geo_map, timer, scale = 4, score_map_thresh=0.8, box_thresh=0.1, nms_thres=0.2):
    '''
    restore text boxes from score map and geo map
    :param score_map:
    :param geo_map:
    :param timer:
    :param score_map_thresh: threshhold for score map
    :param box_thresh: threshhold for boxes
    :param nms_thres: threshold for nms
    :return:
    '''
    if len(score_map.shape) == 4:
        score_map = score_map[0, :, :, 0]
        geo_map = geo_map[0, :, :, ]
    # filter the score map
    xy_text = np.argwhere(score_map > score_map_thresh)
    # sort the text boxes via the y axis
    xy_text = xy_text[np.argsort(xy_text[:, 0])]
    # restore
    start = time.time()
    text_box_restored = restore_rectangle(xy_text[:, ::-1]*4, geo_map[xy_text[:, 0], xy_text[:, 1], :]) # N*4*2
    print('{} text boxes before nms'.format(text_box_restored.shape[0]))
    boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)
    boxes[:, :8] = text_box_restored.reshape((-1, 8))
    boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]
    timer['restore'] = time.time() - start
    # nms part
    start = time.time()
    #boxes = nms_locality.nms_locality(boxes.astype(np.float64), nms_thres)
    boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), nms_thres)
    timer['nms'] = time.time() - start

    if boxes.shape[0] == 0:
        return np.array([]), timer

    boxes_10 = np.zeros((boxes.shape[0], 10), dtype=np.float32)
    boxes_10[:, :9] = boxes
    boxes_10[:, 9] = boxes[:, 8]
    for i, box in enumerate(boxes_10):
        mask = np.zeros_like(score_map, dtype=np.uint8)
        cv2.fillPoly(mask, box[:8].reshape((-1, 4, 2)).astype(np.int32) // scale, 1)
        boxes_10[i, 8] = cv2.mean(score_map, mask)[0]
    boxes_10 = boxes_10[boxes_10[:, 8] > box_thresh]
    boxes_10[:, 8] = boxes_10[:, 9]
    boxes = boxes_10[:, :9]

    return boxes, timer


def detect_HeatMap(score_map, geo_map, timer, scale = 2, score_map_thresh=0.8, box_thresh=0.1, nms_thres=0.2):
    '''
    restore text boxes from score map and geo map
    :param score_map:  four scale 128,64,32,16
    :param geo_map:    four scale 128,64,32,16
    :param feature:    four scale pool2, pool3, pool4, pool5
    :param timer:
    :param scale: based on the ratio of original image,
                  for examle: 4 denotes the size of score_map needs to be magnified 4 times to
                  reach the original size.    # 0, 1, 2, 3  scale = 32, 16, 8, 4
    :param score_map_thresh: threshhold for score map
    :param box_thresh: threshhold for boxes
    :param nms_thres: threshold for nms
    :return:
    '''
    # heat_scoreMap = [] # scale = 0, 1, 2, 3 with 32, 16, 8, 4
    # heat_feature = []  # scale = 0, 1, 2, 3 with 32, 16, 8, 4
    if len(score_map.shape) == 4:
        score_map = score_map[0, :, :, 0]
        geo_map = geo_map[0, :, :, ]

    # first,copy the score_map; second, resize to range 0-255; finally, show and return.
    # In fact, the map can be shown via plt.imshow() with float parameter [0-1.0].
    heat_map = np.copy(score_map)

    # filter the score map
    xy_text = np.argwhere(score_map > score_map_thresh)
    # sort the text boxes via the y axis
    xy_text = xy_text[np.argsort(xy_text[:, 0])]
    # restore
    start = time.time()
    text_box_restored = restore_rectangle(xy_text[:, ::-1]*scale, geo_map[xy_text[:, 0], xy_text[:, 1], :]) # N*4*2
    print('{} text boxes before nms'.format(text_box_restored.shape[0]))
    boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)
    boxes[:, :8] = text_box_restored.reshape((-1, 8))
    boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]
    timer['restore'] = time.time() - start
    # nms part
    start = time.time()

    # FIXME: CAI note, 这里有两个版本的 NMS,第一个使用python写的标准版本,第二个是c++写的加速版本的。
    # boxes = nms_locality.nms_locality(boxes.astype(np.float64), nms_thres)              # Pure python
    boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), nms_thres)             # Python + C++
    boxes = boxes.astype(np.float64)

    timer['nms'] = time.time() - start

    if boxes.shape[0] == 0:
        return np.array([]), timer, heat_map


    for i, box in enumerate(boxes):
        mask = np.zeros_like(score_map, dtype=np.uint8)
        cv2.fillPoly(mask, box[:8].reshape((-1, 4, 2)).astype(np.int32) // scale, 1)
        boxes[i, 8] = cv2.mean(score_map, mask)[0]
    # boxes = boxes[boxes[:, 8] > box_thresh]

    return boxes, timer, heat_map



def sort_poly(p):
    min_axis = np.argmin(np.sum(p, axis=1))
    p = p[[min_axis, (min_axis+1)%4, (min_axis+2)%4, (min_axis+3)%4]]
    if abs(p[0, 0] - p[1, 0]) > abs(p[0, 1] - p[1, 1]):
        return p
    else:
        return p[[0, 3, 2, 1]]


def main(argv=None):
    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list


    if not os.path.exists(FLAGS.Img_output_dir):
        os.makedirs(FLAGS.Img_output_dir)
    if not os.path.exists(FLAGS.Txt_output_dir):
        os.makedirs(FLAGS.Txt_output_dir)


    with tf.get_default_graph().as_default():
        input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')
        global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)

        f_score, f_geometry = model.model(input_images, is_training=False)

        variable_averages = tf.train.ExponentialMovingAverage(0.997, global_step)
        saver = tf.train.Saver(variable_averages.variables_to_restore())

        #with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        with tf.Session(config=tf_config) as sess:
            ckpt_state = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
            model_path = os.path.join(FLAGS.checkpoint_path, os.path.basename(ckpt_state.model_checkpoint_path))
            print('Restore from {}'.format(model_path))
            saver.restore(sess, model_path)

            im_fn_list = get_images()
            sum_time = 0.0
            for im_fn in im_fn_list:
                im = cv2.imread(im_fn)[:, :, ::-1]
                start_time = time.time()
                im_resized, (ratio_h, ratio_w) = resize_image(im)

                timer = {'net': 0, 'restore': 0, 'nms': 0}
                start = time.time()
                score, geometry = sess.run([f_score, f_geometry], feed_dict={input_images: [im_resized]})
                timer['net'] = time.time() - start

                boxes, timer = detect(score_map=score, geo_map=geometry, timer=timer)
                print('{} : net {:.0f}ms, restore {:.0f}ms, nms {:.0f}ms'.format(
                    im_fn, timer['net']*1000, timer['restore']*1000, timer['nms']*1000))

                # if boxes is not None:
                if len(boxes) != 0:
                    scores = boxes[:, 8]
                    boxes = boxes[:, :8].reshape((-1, 4, 2))
                    boxes[:, :, 0] /= ratio_w
                    boxes[:, :, 1] /= ratio_h


                duration = time.time() - start_time
                print('[timing] {}'.format(duration))
                sum_time += duration

                # save to file
                # if boxes is not None:
                if True:
                    res_file = os.path.join(
                        FLAGS.Txt_output_dir,
                        '{}.txt'.format(
                            os.path.basename(im_fn).split('.')[0]))

                    with open(res_file, 'w') as f:
                        for idx, box in enumerate(boxes):
                            # to avoid submitting errors
                            box = sort_poly(box.astype(np.int32))
                            if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3]-box[0]) < 5:
                                continue
                            # Fixme: remove score
                            f.write('{},{},{},{},{},{},{},{},{}\r\n'.format(
                                box[0, 0], box[0, 1], box[1, 0], box[1, 1], box[2, 0], box[2, 1], box[3, 0], box[3, 1],scores[idx]
                            )),
                            cv2.polylines(im[:, :, ::-1], [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(0, 255, 0), thickness=3)
                if not FLAGS.no_write_images:
                    img_path = os.path.join(FLAGS.Img_output_dir, os.path.basename(im_fn))
                    cv2.imwrite(img_path, im[:, :, ::-1])

            print('Average second per frame is {}'.format(sum_time / len(im_fn_list)))
            print('Average frame per second is {}'.format(1.0 / (sum_time / len(im_fn_list))))
            zip_ForHRN(FLAGS.Txt_output_dir, FLAGS.Txt_output_dir + '.zip')


def main_BatchEva(argv=None):
    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list

    print("hello world")

    with tf.get_default_graph().as_default():
        input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')
        global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)

        f_score, f_geometry, F_score_KR, F_logits = model.model(input_images, is_training=False)

        variable_averages = tf.train.ExponentialMovingAverage(0.997, global_step)
        saver = tf.train.Saver(variable_averages.variables_to_restore())

        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            # For batch evaluation
            pattern_str_original = "model_checkpoint_path:'model.ckpt-step'"
            point_list = [ str(e) for e in range(50001, 30000, -5000)]
            for step in point_list:
                pattern_str = pattern_str_original.replace('step', step)
                with open(os.path.join(FLAGS.checkpoint_path, 'checkpoint'), 'w') as fp:
                    fp.write(pattern_str)
                idx = FLAGS.Txt_output_dir.rfind("_")
                Local_txt_output_dir = FLAGS.Txt_output_dir.replace(FLAGS.Txt_output_dir[idx + 1:], step)
                Local_img_output_dir_tmp = FLAGS.Img_output_dir.replace(FLAGS.Img_output_dir[idx + 1:], step)
                Local_ImgHeatMap_output_dir_tmp = FLAGS.ImgHeatMap_output_dir.replace(FLAGS.ImgHeatMap_output_dir[idx+1:], step )

                if not os.path.exists(Local_txt_output_dir):
                    os.makedirs(Local_txt_output_dir)
                if not os.path.exists(Local_img_output_dir_tmp):
                    os.makedirs(Local_img_output_dir_tmp)
                if not os.path.exists(Local_ImgHeatMap_output_dir_tmp):
                    os.makedirs(Local_ImgHeatMap_output_dir_tmp)

                ckpt_state = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
                model_path = os.path.join(FLAGS.checkpoint_path, os.path.basename(ckpt_state.model_checkpoint_path))
                print('Restore from {}'.format(model_path))
                saver.restore(sess, model_path)
                sum_time = 0.0
                im_num = 0

                for video_id in testID_list:
                    video_frame_dir = os.path.join( FLAGS.test_data_path, video_id)

                    fw = open(os.path.join(Local_txt_output_dir, video_id+".txt"), 'w')
                    Local_img_output_dir = os.path.join( Local_img_output_dir_tmp, video_id)
                    if not os.path.exists(Local_img_output_dir):
                        os.makedirs(Local_img_output_dir)
                    Local_ImgHeatMap_output_dir = os.path.join(Local_ImgHeatMap_output_dir_tmp, video_id)
                    if not os.path.exists(Local_ImgHeatMap_output_dir):
                        os.makedirs(Local_ImgHeatMap_output_dir)

                    im_fn_list = get_images(video_frame_dir)
                    im_num += len(im_fn_list)
                    for im_fn in im_fn_list:
                        im_name = os.path.split(im_fn)[1]
                        im_name_fg = os.path.splitext(im_name)[0].replace('img','')  # img000123.jpg==>img000123==>000123
                        im = cv2.imread(im_fn)[:, :, ::-1]
                        height, width, _ = im.shape
                        start_time = time.time()
                        im_resized, (ratio_h, ratio_w) = resize_image(im)

                        timer = {'net': 0, 'restore': 0, 'nms': 0}
                        start = time.time()

                        score, geometry, score_KR, logits = \
                            sess.run([f_score, f_geometry, F_score_KR, F_logits],
                                               feed_dict={input_images: [im_resized]})

                        timer['net'] = time.time() - start

                        # boxes, timer = detect(score_map=score, geo_map=geometry, timer=timer)
                        boxes, timer, heat_map = detect_HeatMap(score_map=score, geo_map=geometry, timer=timer)
                        print('{} : net {:.0f}ms, restore {:.0f}ms, nms {:.0f}ms'.format(
                            im_fn, timer['net']*1000, timer['restore']*1000, timer['nms']*1000))

                        # if boxes is not None:
                        if len(boxes) != 0:
                            scores = boxes[:, 8]
                            boxes = boxes[:, :8].reshape((-1, 4, 2))
                            boxes[:, :, 0] /= ratio_w
                            boxes[:, :, 1] /= ratio_h

                        duration = time.time() - start_time
                        print('[timing] {}'.format(duration))
                        sum_time += duration

                        if len(boxes) == 0:
                            continue
                        for idx, box in enumerate(boxes):
                            # to avoid submitting errors
                            box = sort_poly(box.astype(np.int32))
                            # box = check_and_validate(box)
                            if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3]-box[0]) < 5:
                                continue

                            box[0, 0] = max(0, min(box[0, 0], width))
                            box[1, 0] = max(0, min(box[1, 0], width))
                            box[2, 0] = max(0, min(box[2, 0], width))
                            box[3, 0] = max(0, min(box[3, 0], width))
                            box[0, 1] = max(0, min(box[0, 1], height))
                            box[1, 1] = max(0, min(box[1, 1], height))
                            box[2, 1] = max(0, min(box[2, 1], height))
                            box[3, 1] = max(0, min(box[3, 1], height))
                            x_min = min(box[:,0]) + 0.01
                            x_max = max(box[:,0]) + 0.01
                            y_min = min(box[:,1]) + 0.01
                            y_max = max(box[:, 1]) + 0.01
                            box_width = x_max - x_min
                            box_height = y_max - y_min

                            # 24,-1, 195.0,156.0,54.0,56.0, 0.021061,1,-1
                            fw.write('{},{},{},{},{},{},{},{},{}\r\n'.format(
                                int(im_name_fg), -1, x_min, y_min, box_width, box_height,scores[idx],1,-1
                            ))
                            cv2.polylines(im[:, :, ::-1], [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(0, 255, 0), thickness=2)
                        # Fixme: For each image
                        if not FLAGS.no_write_images:
                            img_path = os.path.join(Local_img_output_dir, os.path.basename(im_fn))
                            cv2.imwrite(img_path, im[:, :, ::-1])

                            # Fixme: heat_map
                            # heat_map_path = os.path.join(Local_ImgHeatMap_output_dir,
                            #                              os.path.splitext(os.path.basename(im_fn))[0] + "_heatMap.png")
                            # fig, ax = plt.subplots()
                            # plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
                            # plt.margins(0, 0)
                            # ax.set_xticks([])
                            # ax.set_yticks([])
                            # # plt.imshow(imutils.opencv2matplotlib(source_image.copy()[:, :, ::-1]))
                            # plt.imshow(imutils.opencv2matplotlib(im[:, :, ::-1].copy()[:, :, ::-1]))
                            # plt.imshow(cv2.resize(heat_map, (width, height), interpolation=cv2.INTER_LINEAR),
                            #            cmap=plt.get_cmap('jet'), interpolation='bilinear', alpha=0.5)
                            # # plt.show()
                            # fig.savefig(heat_map_path)
                            # fig.clear()
                    fw.close()

                        # save to file
                        # if boxes is not None:
                        # if True:
                        #     res_file = os.path.join(
                        #         Local_txt_output_dir,
                        #         '{}.txt'.format(
                        #             os.path.basename(im_fn).split('.')[0]))
                        #
                        #     with open(res_file, 'w') as f:
                        #         # if boxes is None:
                        #         if len(boxes) == 0:
                        #             continue
                        #         for idx, box in enumerate(boxes):
                        #             # to avoid submitting errors
                        #             box = sort_poly(box.astype(np.int32))
                        #             # box = check_and_validate(box)
                        #             if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3]-box[0]) < 5:
                        #                 continue
                        #
                        #             box[0, 0] = max(0, min(box[0, 0], width))
                        #             box[1, 0] = max(0, min(box[1, 0], width))
                        #             box[2, 0] = max(0, min(box[2, 0], width))
                        #             box[3, 0] = max(0, min(box[3, 0], width))
                        #             box[0, 1] = max(0, min(box[0, 1], height))
                        #             box[1, 1] = max(0, min(box[1, 1], height))
                        #             box[2, 1] = max(0, min(box[2, 1], height))
                        #             box[3, 1] = max(0, min(box[3, 1], height))
                        #             x_min = min(box[:,0])
                        #             x_max = max(box[:,0])
                        #             y_min = min(box[:,1])
                        #             y_max = max(box[:, 1])
                        #             box_width = x_max - x_min
                        #             box_height = y_max - y_min
                        #
                        #             # 24,-1, 195.0,156.0,54.0,56.0, 0.021061,1,-1
                        #             f.write('{},{},{},{},{},{},{},{},{}\r\n'.format(
                        #                 int(im_name_fg), -1, x_min, y_min, box_width, box_height,scores[idx],1,-1
                        #             ))
                        #             cv2.polylines(im[:, :, ::-1], [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(0, 255, 0), thickness=2)
                        # if not FLAGS.no_write_images:
                        #     img_path = os.path.join(Local_img_output_dir, os.path.basename(im_fn))
                        #     cv2.imwrite(img_path, im[:, :, ::-1])
                        #
                        #     # Fixme: heat_map
                        #     # heat_map_path = os.path.join(Local_ImgHeatMap_output_dir,
                        #     #                              os.path.splitext(os.path.basename(im_fn))[0] + "_heatMap.png")
                        #     # fig, ax = plt.subplots()
                        #     # plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
                        #     # plt.margins(0, 0)
                        #     # ax.set_xticks([])
                        #     # ax.set_yticks([])
                        #     # # plt.imshow(imutils.opencv2matplotlib(source_image.copy()[:, :, ::-1]))
                        #     # plt.imshow(imutils.opencv2matplotlib(im[:, :, ::-1].copy()[:, :, ::-1]))
                        #     # plt.imshow(cv2.resize(heat_map, (width, height), interpolation=cv2.INTER_LINEAR),
                        #     #            cmap=plt.get_cmap('jet'), interpolation='bilinear', alpha=0.5)
                        #     # # plt.show()
                        #     # fig.savefig(heat_map_path)
                        #     # fig.clear()

                print('Average second per frame is {}'.format(sum_time/im_num))
                print('Average frame per second is {}'.format(1.0 / (sum_time / im_num)))
                # zip_ForHRN(Local_txt_output_dir, Local_txt_output_dir + '.zip')


def zip_ForHRN(startdir, file_names):
    '''
    compress the folder of results for evaluating
    :param startdir:
    :param file_names:
    :return:
    '''
    z = zipfile.ZipFile(file_names, 'w', zipfile.ZIP_DEFLATED)
    for dirpath, dirnames, filenames in os.walk(startdir):
        fpath = dirpath.replace(startdir,'')
        fpath = fpath and fpath + os.sep or ''
        for filename in filenames:
            z.write(os.path.join(dirpath, filename), fpath+filename)
    print('compression', startdir)
    z.close()


if __name__ == '__main__':
    # tf.app.run()
    main_BatchEva()
+0 −0

File added.

Preview size limit exceeded, changes collapsed.

GAnet_train_Vgg16.py

0 → 100644
+0 −0

File added.

Preview size limit exceeded, changes collapsed.

+113 KiB
Loading image diff...