Update utils.py (#93)

* Update utils.py

* Revert change in computation
This commit is contained in:
Falak 2020-02-17 12:41:10 +05:30 committed by GitHub
parent a23837c39c
commit 2eff9c00b1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -10,11 +10,10 @@ import numpy as np
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from torchvision.ops import nms from torchvision.ops import nms
#import maskrcnn_benchmark.layers.nms as nms
def mkdir_if_missing(d):
if not osp.exists(d): def mkdir_if_missing(dir):
os.makedirs(d) os.makedirs(dir, exist_ok=True)
def float3(x): # format floats to 3 decimals def float3(x): # format floats to 3 decimals
@ -38,7 +37,10 @@ def load_classes(path):
return list(filter(None, names)) # filter removes empty strings (such as last line) return list(filter(None, names)) # filter removes empty strings (such as last line)
def model_info(model): # Plots a line-by-line description of a PyTorch model def model_info(model):
"""
Prints out a line-by-line description of a PyTorch model ending with a summary.
"""
n_p = sum(x.numel() for x in model.parameters()) # number parameters n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
print('\n%5s %50s %9s %12s %20s %12s %12s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) print('\n%5s %50s %9s %12s %20s %12s %12s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
@ -50,7 +52,10 @@ def model_info(model): # Plots a line-by-line description of a PyTorch model
def plot_one_box(x, img, color=None, label=None, line_thickness=None): # Plots one bounding box on image img def plot_one_box(x, img, color=None, label=None, line_thickness=None):
"""
Plots one bounding box on image img.
"""
tl = line_thickness or round(0.0004 * max(img.shape[0:2])) + 1 # line thickness tl = line_thickness or round(0.0004 * max(img.shape[0:2])) + 1 # line thickness
color = color or [random.randint(0, 255) for _ in range(3)] color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
@ -74,21 +79,25 @@ def weights_init_normal(m):
def xyxy2xywh(x): def xyxy2xywh(x):
# Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h] # Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]
y = torch.zeros(x.shape) if x.dtype is torch.float32 else np.zeros(x.shape) # x, y are coordinates of center
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # (x1, y1) and (x2, y2) are coordinates of bottom left and top right respectively.
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 y = torch.zeros_like(x) if x.dtype is torch.float32 else np.zeros_like(x)
y[:, 2] = x[:, 2] - x[:, 0] y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 3] = x[:, 3] - x[:, 1] y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y return y
def xywh2xyxy(x): def xywh2xyxy(x):
# Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2] # Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2]
y = torch.zeros(x.shape) if x.dtype is torch.float32 else np.zeros(x.shape) # x, y are coordinates of center
y[:, 0] = (x[:, 0] - x[:, 2] / 2) # (x1, y1) and (x2, y2) are coordinates of bottom left and top right respectively.
y[:, 1] = (x[:, 1] - x[:, 3] / 2) y = torch.zeros_like(x) if x.dtype is torch.float32 else np.zeros_like(x)
y[:, 2] = (x[:, 0] + x[:, 2] / 2) y[:, 0] = (x[:, 0] - x[:, 2] / 2) # Bottom left x
y[:, 3] = (x[:, 1] + x[:, 3] / 2) y[:, 1] = (x[:, 1] - x[:, 3] / 2) # Bottom left y
y[:, 2] = (x[:, 0] + x[:, 2] / 2) # Top right x
y[:, 3] = (x[:, 1] + x[:, 3] / 2) # Top right y
return y return y
@ -107,7 +116,7 @@ def scale_coords(img_size, coords, img0_shape):
def ap_per_class(tp, conf, pred_cls, target_cls): def ap_per_class(tp, conf, pred_cls, target_cls):
""" Compute the average precision, given the recall and precision curves. """ Computes the average precision, given the recall and precision curves.
Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics. Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments # Arguments
tp: True positives (list). tp: True positives (list).
@ -161,7 +170,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls):
def compute_ap(recall, precision): def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves. """ Computes the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn. Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments # Arguments
recall: The recall curve (list). recall: The recall curve (list).
@ -542,8 +551,6 @@ def jaccard(box_a, box_b, iscrowd:bool=False):
return out if use_batch else out.squeeze(0) return out if use_batch else out.squeeze(0)
def return_torch_unique_index(u, uv): def return_torch_unique_index(u, uv):
n = uv.shape[1] # number of columns n = uv.shape[1] # number of columns
first_unique = torch.zeros(n, device=u.device).long() first_unique = torch.zeros(n, device=u.device).long()
@ -555,16 +562,19 @@ def return_torch_unique_index(u, uv):
def strip_optimizer_from_checkpoint(filename='weights/best.pt'): def strip_optimizer_from_checkpoint(filename='weights/best.pt'):
# Strip optimizer from *.pt files for lighter files (reduced by 2/3 size) # Strip optimizer from *.pt files for lighter files (reduced by 2/3 size)
a = torch.load(filename, map_location='cpu') a = torch.load(filename, map_location='cpu')
a['optimizer'] = [] a['optimizer'] = []
torch.save(a, filename.replace('.pt', '_lite.pt')) torch.save(a, filename.replace('.pt', '_lite.pt'))
def plot_results(): def plot_results():
# Plot YOLO training results file 'results.txt' """
# import os; os.system('wget https://storage.googleapis.com/ultralytics/yolov3/results_v1.txt') Plot YOLO training results from the file 'results.txt'
Example of what this is trying to plot can be found at:
https://user-images.githubusercontent.com/26833433/63258271-fe9d5300-c27b-11e9-9a15-95038daf4438.png
An example results.txt file:
import os; os.system('wget https://storage.googleapis.com/ultralytics/yolov3/results_v1.txt')
"""
plt.figure(figsize=(14, 7)) plt.figure(figsize=(14, 7))
s = ['X + Y', 'Width + Height', 'Confidence', 'Classification', 'Total Loss', 'mAP', 'Recall', 'Precision'] s = ['X + Y', 'Width + Height', 'Confidence', 'Classification', 'Total Loss', 'mAP', 'Recall', 'Precision']
files = sorted(glob.glob('results*.txt')) files = sorted(glob.glob('results*.txt'))