Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
S
stacked_hourglasses
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wudiao
stacked_hourglasses
Commits
322601f1
Commit
322601f1
authored
Oct 09, 2021
by
baihe
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
updated
parent
86b64e4e
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
0 additions
and
323 deletions
+0
-323
DetectorLoader.py
+0
-117
mymodels/yolo-tiny-onecls/best-model.pth
+0
-0
mymodels/yolo-tiny-onecls/yolov3-tiny-onecls.cfg
+0
-206
No files found.
DetectorLoader.py
deleted
100644 → 0
View file @
86b64e4e
import
time
import
torch
import
numpy
as
np
import
torchvision.transforms
as
transforms
from
queue
import
Queue
from
threading
import
Thread
from
Detection.Models
import
Darknet
from
Detection.Utils
import
non_max_suppression
,
ResizePadding
class
TinyYOLOv3_onecls
(
object
):
"""Load trained Tiny-YOLOv3 one class (person) detection model.
Args:
input_size: (int) Size of input image must be divisible by 32. Default: 416,
config_file: (str) Path to Yolo model structure config file.,
weight_file: (str) Path to trained weights file.,
nms: (float) Non-Maximum Suppression overlap threshold.,
conf_thres: (float) Minimum Confidence threshold of predicted bboxs to cut off.,
device: (str) Device to load the model on 'cpu' or 'cuda'.
"""
def
__init__
(
self
,
input_size
=
416
,
config_file
=
'mymodels/yolo-tiny-onecls/yolov3-tiny-onecls.cfg'
,
weight_file
=
'mymodels/yolo-tiny-onecls/best-model.pth'
,
nms
=
0.2
,
conf_thres
=
0.45
,
device
=
'cuda'
):
self
.
input_size
=
input_size
self
.
model
=
Darknet
(
config_file
)
.
to
(
device
)
self
.
model
.
load_state_dict
(
torch
.
load
(
weight_file
,
map_location
=
torch
.
device
(
'cpu'
)))
self
.
model
.
eval
()
self
.
device
=
device
self
.
nms
=
nms
self
.
conf_thres
=
conf_thres
self
.
resize_fn
=
ResizePadding
(
input_size
,
input_size
)
self
.
transf_fn
=
transforms
.
ToTensor
()
def
detect
(
self
,
image
,
need_resize
=
True
,
expand_bb
=
5
):
"""Feed forward to the model.
Args:
image: (numpy array) Single RGB image to detect.,
need_resize: (bool) Resize to input_size before feed and will return bboxs
with scale to image original size.,
expand_bb: (int) Expand boundary of the boxs.
Returns:
(torch.float32) Of each detected object contain a
[top, left, bottom, right, bbox_score, class_score, class]
return `None` if no detected.
"""
image_size
=
(
self
.
input_size
,
self
.
input_size
)
if
need_resize
:
image_size
=
image
.
shape
[:
2
]
image
=
self
.
resize_fn
(
image
)
image
=
self
.
transf_fn
(
image
)[
None
,
...
]
scf
=
torch
.
min
(
self
.
input_size
/
torch
.
FloatTensor
([
image_size
]),
1
)[
0
]
detected
=
self
.
model
(
image
.
to
(
self
.
device
))
detected
=
non_max_suppression
(
detected
,
self
.
conf_thres
,
self
.
nms
)[
0
]
if
detected
is
not
None
:
detected
[:,
[
0
,
2
]]
-=
(
self
.
input_size
-
scf
*
image_size
[
1
])
/
2
detected
[:,
[
1
,
3
]]
-=
(
self
.
input_size
-
scf
*
image_size
[
0
])
/
2
detected
[:,
0
:
4
]
/=
scf
detected
[:,
0
:
2
]
=
np
.
maximum
(
0
,
detected
[:,
0
:
2
]
-
expand_bb
)
detected
[:,
2
:
4
]
=
np
.
minimum
(
image_size
[::
-
1
],
detected
[:,
2
:
4
]
+
expand_bb
)
return
detected
class
ThreadDetection
(
object
):
def
__init__
(
self
,
dataloader
,
model
,
queue_size
=
256
):
self
.
model
=
model
self
.
dataloader
=
dataloader
self
.
stopped
=
False
self
.
Q
=
Queue
(
maxsize
=
queue_size
)
def
start
(
self
):
t
=
Thread
(
target
=
self
.
update
,
args
=
(),
daemon
=
True
)
.
start
()
return
self
def
update
(
self
):
while
True
:
if
self
.
stopped
:
return
images
=
self
.
dataloader
.
getitem
()
outputs
=
self
.
model
.
detect
(
images
)
if
self
.
Q
.
full
():
time
.
sleep
(
2
)
self
.
Q
.
put
((
images
,
outputs
))
def
getitem
(
self
):
return
self
.
Q
.
get
()
def
stop
(
self
):
self
.
stopped
=
True
def
__len__
(
self
):
return
self
.
Q
.
qsize
()
mymodels/yolo-tiny-onecls/best-model.pth
deleted
100644 → 0
View file @
86b64e4e
File deleted
mymodels/yolo-tiny-onecls/yolov3-tiny-onecls.cfg
deleted
100644 → 0
View file @
86b64e4e
[net]
# Testing
batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=2
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
# 0
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky
# 1
[maxpool]
size=2
stride=2
# 2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# 3
[maxpool]
size=2
stride=2
# 4
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
# 5
[maxpool]
size=2
stride=2
# 6
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
# 7
[maxpool]
size=2
stride=2
# 8
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
# 9
[maxpool]
size=2
stride=2
# 10
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
# 11
[maxpool]
size=2
stride=1
# 12
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
###########
# 13
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
# 14
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
# 15
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
# 16
[yolo]
mask = 3,4,5
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=1
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
# 17
[route]
layers = -4
# 18
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
# 19
[upsample]
stride=2
# 20
[route]
layers = -1, 8
# 21
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
# 22
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
# 23
[yolo]
mask = 1,2,3
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=1
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment