Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ repos:
- id: check-docstring-first

- repo: https://github.com/asottile/pyupgrade
rev: v2.31.0
rev: v2.31.1
hooks:
- id: pyupgrade
args: [--py36-plus]
Expand All @@ -37,7 +37,7 @@ repos:
name: Sort imports

- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.31.0
rev: v0.32.0
hooks:
- id: yapf
name: YAPF formatting
Expand Down
5 changes: 5 additions & 0 deletions models/tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ def call(self, inputs):


class TFPad(keras.layers.Layer):

def __init__(self, pad):
super().__init__()
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
Expand Down Expand Up @@ -206,6 +207,7 @@ def call(self, inputs):


class TFDetect(keras.layers.Layer):
# TF YOLOv5 Detect layer
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
super().__init__()
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
Expand Down Expand Up @@ -255,6 +257,7 @@ def _make_grid(nx=20, ny=20):


class TFUpsample(keras.layers.Layer):
# TF version of torch.nn.Upsample()
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
super().__init__()
assert scale_factor == 2, "scale_factor must be 2"
Expand All @@ -269,6 +272,7 @@ def call(self, inputs):


class TFConcat(keras.layers.Layer):
# TF version of torch.concat()
def __init__(self, dimension=1, w=None):
super().__init__()
assert dimension == 1, "convert only NCHW to NHWC concat"
Expand Down Expand Up @@ -331,6 +335,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)


class TFModel:
# TF YOLOv5 model
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
super().__init__()
if isinstance(cfg, dict):
Expand Down
1 change: 1 addition & 0 deletions models/yolo.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ def _make_grid(self, nx=20, ny=20, i=0):


class Model(nn.Module):
# YOLOv5 model
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super().__init__()
if isinstance(cfg, dict):
Expand Down
20 changes: 12 additions & 8 deletions utils/activations.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,29 +8,32 @@
import torch.nn.functional as F


# SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
class SiLU(nn.Module): # export-friendly version of nn.SiLU()
class SiLU(nn.Module):
# SiLU activation https://arxiv.org/pdf/1606.08415.pdf
@staticmethod
def forward(x):
return x * torch.sigmoid(x)


class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
class Hardswish(nn.Module):
# Hard-SiLU activation
@staticmethod
def forward(x):
# return x * F.hardsigmoid(x) # for TorchScript and CoreML
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX


# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
class Mish(nn.Module):
# Mish activation https://github.com/digantamisra98/Mish
@staticmethod
def forward(x):
return x * F.softplus(x).tanh()


class MemoryEfficientMish(nn.Module):
# Mish activation memory-efficient
class F(torch.autograd.Function):

@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
Expand All @@ -47,8 +50,8 @@ def forward(self, x):
return self.F.apply(x)


# FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
class FReLU(nn.Module):
# FReLU activation https://arxiv.org/abs/2007.11824
def __init__(self, c1, k=3): # ch_in, kernel
super().__init__()
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
Expand All @@ -58,12 +61,12 @@ def forward(self, x):
return torch.max(x, self.bn(self.conv(x)))


# ACON https://arxiv.org/pdf/2009.04759.pdf ----------------------------------------------------------------------------
class AconC(nn.Module):
r""" ACON activation (activate or not).
r""" ACON activation (activate or not)
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""

def __init__(self, c1):
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
Expand All @@ -76,10 +79,11 @@ def forward(self, x):


class MetaAconC(nn.Module):
r""" ACON activation (activate or not).
r""" ACON activation (activate or not)
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""

def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
super().__init__()
c2 = max(r, c1 // r)
Expand Down
1 change: 1 addition & 0 deletions utils/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ class Callbacks:
""""
Handles all registered callbacks for YOLOv5 Hooks
"""

def __init__(self):
# Define the available callbacks
self._callbacks = {
Expand Down
3 changes: 3 additions & 0 deletions utils/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ class InfiniteDataLoader(dataloader.DataLoader):

Uses same syntax as vanilla DataLoader
"""

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
Expand All @@ -164,6 +165,7 @@ class _RepeatSampler:
Args:
sampler (Sampler)
"""

def __init__(self, sampler):
self.sampler = sampler

Expand Down Expand Up @@ -978,6 +980,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""

def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
Expand Down
1 change: 1 addition & 0 deletions utils/loggers/wandb/wandb_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ class WandbLogger():
For more on how this logger is used, see the Weights & Biases documentation:
https://docs.wandb.com/guides/integrations/yolov5
"""

def __init__(self, opt, run_id=None, job_type='Training'):
"""
- Initialize WandbLogger instance
Expand Down
1 change: 1 addition & 0 deletions utils/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,7 @@ def box_iou(box1, box2):
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""

def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
Expand Down
1 change: 1 addition & 0 deletions utils/torch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,7 @@ class ModelEMA:
Keeps a moving average of everything in the model state_dict (parameters and buffers)
For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
"""

def __init__(self, model, decay=0.9999, tau=2000, updates=0):
# Create EMA
self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA
Expand Down