Skip to content

Commit 4c3a151

Browse files
committed
update files
1 parent 8cfe246 commit 4c3a151

File tree

122 files changed

+12284
-228
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

122 files changed

+12284
-228
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,13 @@
22
__pycache__/
33
build/
44
*.egg-info
5+
.idea/
56

67

78
# Files
89
*.weights
10+
*.pth
11+
*.pt
912
*.t7
1013
*.mp4
1114
*.avi

coco_classes.json

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
{
2+
"0": "person",
3+
"1": "bicycle",
4+
"10": "fire hydrant",
5+
"11": "stop sign",
6+
"12": "parking meter",
7+
"13": "bench",
8+
"14": "bird",
9+
"15": "cat",
10+
"16": "dog",
11+
"17": "horse",
12+
"18": "sheep",
13+
"19": "cow",
14+
"2": "car",
15+
"20": "elephant",
16+
"21": "bear",
17+
"22": "zebra",
18+
"23": "giraffe",
19+
"24": "backpack",
20+
"25": "umbrella",
21+
"26": "handbag",
22+
"27": "tie",
23+
"28": "suitcase",
24+
"29": "frisbee",
25+
"3": "motorcycle",
26+
"30": "skis",
27+
"31": "snowboard",
28+
"32": "sports ball",
29+
"33": "kite",
30+
"34": "baseball bat",
31+
"35": "baseball glove",
32+
"36": "skateboard",
33+
"37": "surfboard",
34+
"38": "tennis racket",
35+
"39": "bottle",
36+
"4": "airplane",
37+
"40": "wine glass",
38+
"41": "cup",
39+
"42": "fork",
40+
"43": "knife",
41+
"44": "spoon",
42+
"45": "bowl",
43+
"46": "banana",
44+
"47": "apple",
45+
"48": "sandwich",
46+
"49": "orange",
47+
"5": "bus",
48+
"50": "broccoli",
49+
"51": "carrot",
50+
"52": "hot dog",
51+
"53": "pizza",
52+
"54": "donut",
53+
"55": "cake",
54+
"56": "chair",
55+
"57": "couch",
56+
"58": "potted plant",
57+
"59": "bed",
58+
"6": "train",
59+
"60": "dining table",
60+
"61": "toilet",
61+
"62": "tv",
62+
"63": "laptop",
63+
"64": "mouse",
64+
"65": "remote",
65+
"66": "keyboard",
66+
"67": "cell phone",
67+
"68": "microwave",
68+
"69": "oven",
69+
"7": "truck",
70+
"70": "toaster",
71+
"71": "sink",
72+
"72": "refrigerator",
73+
"73": "book",
74+
"74": "clock",
75+
"75": "vase",
76+
"76": "scissors",
77+
"77": "teddy bear",
78+
"78": "hair drier",
79+
"79": "toothbrush",
80+
"8": "boat",
81+
"9": "traffic light"
82+
}

configs/yolov5l.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
YOLOV5:
2+
CFG: "./detector/YOLOv5/models/yolov5l.yaml"
3+
WEIGHT: "./detector/YOLOv5/yolov5l.pt"
4+
DATA: './detector/YOLOv5/data/coco128.yaml'
5+
6+
IMGSZ: [640, 640]
7+
SCORE_THRESH: 0.25
8+
NMS_THRESH: 0.45
9+
MAX_DET: 100

configs/yolov5m.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
YOLOV5:
2+
CFG: "./detector/YOLOv5/models/yolov5m.yaml"
3+
WEIGHT: "./detector/YOLOv5/yolov5m.pt"
4+
DATA: './detector/YOLOv5/data/coco128.yaml'
5+
6+
IMGSZ: [640, 640]
7+
SCORE_THRESH: 0.25
8+
NMS_THRESH: 0.45
9+
MAX_DET: 100

configs/yolov5n.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
YOLOV5:
2+
CFG: "./detector/YOLOv5/models/yolov5n.yaml"
3+
WEIGHT: "./detector/YOLOv5/yolov5n.pt"
4+
DATA: './detector/YOLOv5/data/coco128.yaml'
5+
6+
IMGSZ: [640, 640]
7+
SCORE_THRESH: 0.25
8+
NMS_THRESH: 0.45
9+
MAX_DET: 100

configs/yolov5s.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
YOLOV5:
2+
CFG: "./detector/YOLOv5/models/yolov5s.yaml"
3+
WEIGHT: "./detector/YOLOv5/yolov5s.pt"
4+
DATA: './detector/YOLOv5/data/coco128.yaml'
5+
6+
IMGSZ: [640, 640]
7+
SCORE_THRESH: 0.25
8+
NMS_THRESH: 0.45
9+
MAX_DET: 100

configs/yolov5x.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
YOLOV5:
2+
CFG: "./detector/YOLOv5/models/yolov5x.yaml"
3+
WEIGHT: "./detector/YOLOv5/yolov5x.pt"
4+
DATA: './detector/YOLOv5/data/coco128.yaml'
5+
6+
IMGSZ: [640, 640]
7+
SCORE_THRESH: 0.25
8+
NMS_THRESH: 0.45
9+
MAX_DET: 100

deep_sort/__init__.py

Lines changed: 10 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,19 @@
11
from .deep_sort import DeepSort
22

3-
43
__all__ = ['DeepSort', 'build_tracker']
54

65

76
def build_tracker(cfg, use_cuda):
87
if cfg.USE_FASTREID:
9-
return DeepSort(model_path=cfg.FASTREID.CHECKPOINT, model_config=cfg.FASTREID.CFG,
10-
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
11-
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
12-
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET, use_cuda=use_cuda)
8+
return DeepSort(model_path=cfg.FASTREID.CHECKPOINT, model_config=cfg.FASTREID.CFG,
9+
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
10+
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
11+
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
12+
use_cuda=use_cuda)
1313

1414
else:
15-
return DeepSort(model_path=cfg.DEEPSORT.REID_CKPT,
16-
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
17-
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
18-
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET, use_cuda=use_cuda)
19-
20-
21-
22-
23-
24-
25-
26-
27-
28-
15+
return DeepSort(model_path=cfg.DEEPSORT.REID_CKPT,
16+
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
17+
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
18+
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
19+
use_cuda=use_cuda)

deep_sort/deep/feature_extractor.py

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,20 @@
55
import logging
66

77
from .model import Net
8-
from fastreid.config import get_cfg
9-
from fastreid.engine import DefaultTrainer
10-
from fastreid.utils.checkpoint import Checkpointer
8+
from .resnet import resnet18
9+
10+
11+
# from fastreid.config import get_cfg
12+
# from fastreid.engine import DefaultTrainer
13+
# from fastreid.utils.checkpoint import Checkpointer
1114

1215
class Extractor(object):
1316
def __init__(self, model_path, use_cuda=True):
1417
self.net = Net(reid=True)
18+
# self.net = resnet18(reid=True)
1519
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
16-
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)['net_dict']
17-
self.net.load_state_dict(state_dict)
20+
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
21+
self.net.load_state_dict(state_dict if 'net_dict' not in state_dict else state_dict['net_dict'], strict=False)
1822
logger = logging.getLogger("root.tracker")
1923
logger.info("Loading weights from {}... Done!".format(model_path))
2024
self.net.to(self.device)
@@ -23,8 +27,6 @@ def __init__(self, model_path, use_cuda=True):
2327
transforms.ToTensor(),
2428
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
2529
])
26-
27-
2830

2931
def _preprocess(self, im_crops):
3032
"""
@@ -35,20 +37,21 @@ def _preprocess(self, im_crops):
3537
3. to torch Tensor
3638
4. normalize
3739
"""
40+
3841
def _resize(im, size):
39-
return cv2.resize(im.astype(np.float32)/255., size)
42+
return cv2.resize(im.astype(np.float32) / 255., size)
4043

4144
im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()
4245
return im_batch
4346

44-
4547
def __call__(self, im_crops):
4648
im_batch = self._preprocess(im_crops)
4749
with torch.no_grad():
4850
im_batch = im_batch.to(self.device)
4951
features = self.net(im_batch)
5052
return features.cpu().numpy()
5153

54+
5255
class FastReIDExtractor(object):
5356
def __init__(self, model_config, model_path, use_cuda=True):
5457
cfg = get_cfg()
@@ -69,15 +72,13 @@ def __init__(self, model_config, model_path, use_cuda=True):
6972
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
7073
])
7174

72-
7375
def _preprocess(self, im_crops):
7476
def _resize(im, size):
75-
return cv2.resize(im.astype(np.float32)/255., size)
77+
return cv2.resize(im.astype(np.float32) / 255., size)
7678

7779
im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()
7880
return im_batch
7981

80-
8182
def __call__(self, im_crops):
8283
im_batch = self._preprocess(im_crops)
8384
with torch.no_grad():
@@ -86,10 +87,8 @@ def __call__(self, im_crops):
8687
return features.cpu().numpy()
8788

8889

89-
9090
if __name__ == '__main__':
91-
img = cv2.imread("demo.jpg")[:,:,(2,1,0)]
91+
img = cv2.imread("demo.jpg")[:, :, (2, 1, 0)]
9292
extr = Extractor("checkpoint/ckpt.t7")
9393
feature = extr(img)
9494
print(feature.shape)
95-

deep_sort/deep/model.py

Lines changed: 25 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -2,17 +2,18 @@
22
import torch.nn as nn
33
import torch.nn.functional as F
44

5+
56
class BasicBlock(nn.Module):
6-
def __init__(self, c_in, c_out,is_downsample=False):
7-
super(BasicBlock,self).__init__()
7+
def __init__(self, c_in, c_out, is_downsample=False):
8+
super(BasicBlock, self).__init__()
89
self.is_downsample = is_downsample
910
if is_downsample:
1011
self.conv1 = nn.Conv2d(c_in, c_out, 3, stride=2, padding=1, bias=False)
1112
else:
1213
self.conv1 = nn.Conv2d(c_in, c_out, 3, stride=1, padding=1, bias=False)
1314
self.bn1 = nn.BatchNorm2d(c_out)
1415
self.relu = nn.ReLU(True)
15-
self.conv2 = nn.Conv2d(c_out,c_out,3,stride=1,padding=1, bias=False)
16+
self.conv2 = nn.Conv2d(c_out, c_out, 3, stride=1, padding=1, bias=False)
1617
self.bn2 = nn.BatchNorm2d(c_out)
1718
if is_downsample:
1819
self.downsample = nn.Sequential(
@@ -26,48 +27,50 @@ def __init__(self, c_in, c_out,is_downsample=False):
2627
)
2728
self.is_downsample = True
2829

29-
def forward(self,x):
30+
def forward(self, x):
3031
y = self.conv1(x)
3132
y = self.bn1(y)
3233
y = self.relu(y)
3334
y = self.conv2(y)
3435
y = self.bn2(y)
3536
if self.is_downsample:
3637
x = self.downsample(x)
37-
return F.relu(x.add(y),True)
38+
return F.relu(x.add(y), True)
39+
3840

39-
def make_layers(c_in,c_out,repeat_times, is_downsample=False):
41+
def make_layers(c_in, c_out, repeat_times, is_downsample=False):
4042
blocks = []
4143
for i in range(repeat_times):
42-
if i ==0:
43-
blocks += [BasicBlock(c_in,c_out, is_downsample=is_downsample),]
44+
if i == 0:
45+
blocks += [BasicBlock(c_in, c_out, is_downsample=is_downsample), ]
4446
else:
45-
blocks += [BasicBlock(c_out,c_out),]
47+
blocks += [BasicBlock(c_out, c_out), ]
4648
return nn.Sequential(*blocks)
4749

50+
4851
class Net(nn.Module):
49-
def __init__(self, num_classes=751 ,reid=False):
50-
super(Net,self).__init__()
52+
def __init__(self, num_classes=751, reid=False):
53+
super(Net, self).__init__()
5154
# 3 128 64
5255
self.conv = nn.Sequential(
53-
nn.Conv2d(3,64,3,stride=1,padding=1),
56+
nn.Conv2d(3, 64, 3, stride=1, padding=1),
5457
nn.BatchNorm2d(64),
5558
nn.ReLU(inplace=True),
5659
# nn.Conv2d(32,32,3,stride=1,padding=1),
5760
# nn.BatchNorm2d(32),
5861
# nn.ReLU(inplace=True),
59-
nn.MaxPool2d(3,2,padding=1),
62+
nn.MaxPool2d(3, 2, padding=1),
6063
)
6164
# 32 64 32
62-
self.layer1 = make_layers(64,64,2,False)
65+
self.layer1 = make_layers(64, 64, 2, False)
6366
# 32 64 32
64-
self.layer2 = make_layers(64,128,2,True)
67+
self.layer2 = make_layers(64, 128, 2, True)
6568
# 64 32 16
66-
self.layer3 = make_layers(128,256,2,True)
69+
self.layer3 = make_layers(128, 256, 2, True)
6770
# 128 16 8
68-
self.layer4 = make_layers(256,512,2,True)
71+
self.layer4 = make_layers(256, 512, 2, True)
6972
# 256 8 4
70-
self.avgpool = nn.AvgPool2d((8,4),1)
73+
self.avgpool = nn.AdaptiveAvgPool2d(1)
7174
# 256 1 1
7275
self.reid = reid
7376
self.classifier = nn.Sequential(
@@ -77,18 +80,18 @@ def __init__(self, num_classes=751 ,reid=False):
7780
nn.Dropout(),
7881
nn.Linear(256, num_classes),
7982
)
80-
83+
8184
def forward(self, x):
8285
x = self.conv(x)
8386
x = self.layer1(x)
8487
x = self.layer2(x)
8588
x = self.layer3(x)
8689
x = self.layer4(x)
8790
x = self.avgpool(x)
88-
x = x.view(x.size(0),-1)
91+
x = x.view(x.size(0), -1)
8992
# B x 128
9093
if self.reid:
91-
x = x.div(x.norm(p=2,dim=1,keepdim=True))
94+
x = x.div(x.norm(p=2, dim=1, keepdim=True))
9295
return x
9396
# classifier
9497
x = self.classifier(x)
@@ -97,8 +100,6 @@ def forward(self, x):
97100

98101
if __name__ == '__main__':
99102
net = Net()
100-
x = torch.randn(4,3,128,64)
103+
x = torch.randn(4, 3, 128, 64)
101104
y = net(x)
102-
import ipdb; ipdb.set_trace()
103-
104105

0 commit comments

Comments
 (0)