ultralytics 8.0.41
TF SavedModel and EdgeTPU export (#1034)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Noobtoss <96134731+Noobtoss@users.noreply.github.com> Co-authored-by: Ayush Chaurasia <ayush.chaurarsia@gmail.com>
This commit is contained in:
@ -256,7 +256,7 @@ def entrypoint(debug=''):
|
||||
|
||||
# Defaults
|
||||
task2model = dict(detect='yolov8n.pt', segment='yolov8n-seg.pt', classify='yolov8n-cls.pt')
|
||||
task2data = dict(detect='coco128.yaml', segment='coco128-seg.yaml', classify='mnist160')
|
||||
task2data = dict(detect='coco128.yaml', segment='coco128-seg.yaml', classify='imagenet100')
|
||||
|
||||
# Mode
|
||||
mode = overrides.get('mode', None)
|
||||
|
@ -583,9 +583,10 @@ class Albumentations:
|
||||
# TODO: add supports of segments and keypoints
|
||||
if self.transform and random.random() < self.p:
|
||||
new = self.transform(image=im, bboxes=bboxes, class_labels=cls) # transformed
|
||||
labels['img'] = new['image']
|
||||
labels['cls'] = np.array(new['class_labels'])
|
||||
bboxes = np.array(new['bboxes'])
|
||||
if len(new['class_labels']) > 0: # skip update if no bbox in new im
|
||||
labels['img'] = new['image']
|
||||
labels['cls'] = np.array(new['class_labels'])
|
||||
bboxes = np.array(new['bboxes'])
|
||||
labels['instances'].update(bboxes=bboxes)
|
||||
return labels
|
||||
|
||||
|
@ -1,73 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
|
||||
# Example usage: python train.py --data Argoverse.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── Argoverse ← downloads here (31.3 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/Argoverse # dataset root dir
|
||||
train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
|
||||
val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
|
||||
test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: bus
|
||||
5: truck
|
||||
6: traffic_light
|
||||
7: stop_sign
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import json
|
||||
from tqdm import tqdm
|
||||
from ultralytics.yolo.utils.downloads import download
|
||||
from pathlib import Path
|
||||
|
||||
def argoverse2yolo(set):
|
||||
labels = {}
|
||||
a = json.load(open(set, "rb"))
|
||||
for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
|
||||
img_id = annot['image_id']
|
||||
img_name = a['images'][img_id]['name']
|
||||
img_label_name = f'{img_name[:-3]}txt'
|
||||
|
||||
cls = annot['category_id'] # instance class id
|
||||
x_center, y_center, width, height = annot['bbox']
|
||||
x_center = (x_center + width / 2) / 1920.0 # offset and scale
|
||||
y_center = (y_center + height / 2) / 1200.0 # offset and scale
|
||||
width /= 1920.0 # scale
|
||||
height /= 1200.0 # scale
|
||||
|
||||
img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
|
||||
if not img_dir.exists():
|
||||
img_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
k = str(img_dir / img_label_name)
|
||||
if k not in labels:
|
||||
labels[k] = []
|
||||
labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
|
||||
|
||||
for k in labels:
|
||||
with open(k, "w") as f:
|
||||
f.writelines(labels[k])
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
|
||||
download(urls, dir=dir)
|
||||
|
||||
# Convert
|
||||
annotations_dir = 'Argoverse-HD/annotations/'
|
||||
(dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
|
||||
for d in "train.json", "val.json":
|
||||
argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
|
@ -1,54 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
|
||||
# Example usage: python train.py --data GlobalWheat2020.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── GlobalWheat2020 ← downloads here (7.0 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/GlobalWheat2020 # dataset root dir
|
||||
train: # train images (relative to 'path') 3422 images
|
||||
- images/arvalis_1
|
||||
- images/arvalis_2
|
||||
- images/arvalis_3
|
||||
- images/ethz_1
|
||||
- images/rres_1
|
||||
- images/inrae_1
|
||||
- images/usask_1
|
||||
val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
|
||||
- images/ethz_1
|
||||
test: # test images (optional) 1276 images
|
||||
- images/utokyo_1
|
||||
- images/utokyo_2
|
||||
- images/nau_1
|
||||
- images/uq_1
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: wheat_head
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from ultralytics.yolo.utils.downloads import download
|
||||
from pathlib import Path
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
|
||||
download(urls, dir=dir)
|
||||
|
||||
# Make Directories
|
||||
for p in 'annotations', 'images', 'labels':
|
||||
(dir / p).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Move
|
||||
for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
|
||||
'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
|
||||
(dir / p).rename(dir / 'images' / p) # move to /images
|
||||
f = (dir / p).with_suffix('.json') # json file
|
||||
if f.exists():
|
||||
f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
|
File diff suppressed because it is too large
Load Diff
@ -1,443 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# Objects365 dataset https://www.objects365.org/ by Megvii
|
||||
# Example usage: python train.py --data Objects365.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/Objects365 # dataset root dir
|
||||
train: images/train # train images (relative to 'path') 1742289 images
|
||||
val: images/val # val images (relative to 'path') 80000 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: Person
|
||||
1: Sneakers
|
||||
2: Chair
|
||||
3: Other Shoes
|
||||
4: Hat
|
||||
5: Car
|
||||
6: Lamp
|
||||
7: Glasses
|
||||
8: Bottle
|
||||
9: Desk
|
||||
10: Cup
|
||||
11: Street Lights
|
||||
12: Cabinet/shelf
|
||||
13: Handbag/Satchel
|
||||
14: Bracelet
|
||||
15: Plate
|
||||
16: Picture/Frame
|
||||
17: Helmet
|
||||
18: Book
|
||||
19: Gloves
|
||||
20: Storage box
|
||||
21: Boat
|
||||
22: Leather Shoes
|
||||
23: Flower
|
||||
24: Bench
|
||||
25: Potted Plant
|
||||
26: Bowl/Basin
|
||||
27: Flag
|
||||
28: Pillow
|
||||
29: Boots
|
||||
30: Vase
|
||||
31: Microphone
|
||||
32: Necklace
|
||||
33: Ring
|
||||
34: SUV
|
||||
35: Wine Glass
|
||||
36: Belt
|
||||
37: Monitor/TV
|
||||
38: Backpack
|
||||
39: Umbrella
|
||||
40: Traffic Light
|
||||
41: Speaker
|
||||
42: Watch
|
||||
43: Tie
|
||||
44: Trash bin Can
|
||||
45: Slippers
|
||||
46: Bicycle
|
||||
47: Stool
|
||||
48: Barrel/bucket
|
||||
49: Van
|
||||
50: Couch
|
||||
51: Sandals
|
||||
52: Basket
|
||||
53: Drum
|
||||
54: Pen/Pencil
|
||||
55: Bus
|
||||
56: Wild Bird
|
||||
57: High Heels
|
||||
58: Motorcycle
|
||||
59: Guitar
|
||||
60: Carpet
|
||||
61: Cell Phone
|
||||
62: Bread
|
||||
63: Camera
|
||||
64: Canned
|
||||
65: Truck
|
||||
66: Traffic cone
|
||||
67: Cymbal
|
||||
68: Lifesaver
|
||||
69: Towel
|
||||
70: Stuffed Toy
|
||||
71: Candle
|
||||
72: Sailboat
|
||||
73: Laptop
|
||||
74: Awning
|
||||
75: Bed
|
||||
76: Faucet
|
||||
77: Tent
|
||||
78: Horse
|
||||
79: Mirror
|
||||
80: Power outlet
|
||||
81: Sink
|
||||
82: Apple
|
||||
83: Air Conditioner
|
||||
84: Knife
|
||||
85: Hockey Stick
|
||||
86: Paddle
|
||||
87: Pickup Truck
|
||||
88: Fork
|
||||
89: Traffic Sign
|
||||
90: Balloon
|
||||
91: Tripod
|
||||
92: Dog
|
||||
93: Spoon
|
||||
94: Clock
|
||||
95: Pot
|
||||
96: Cow
|
||||
97: Cake
|
||||
98: Dinning Table
|
||||
99: Sheep
|
||||
100: Hanger
|
||||
101: Blackboard/Whiteboard
|
||||
102: Napkin
|
||||
103: Other Fish
|
||||
104: Orange/Tangerine
|
||||
105: Toiletry
|
||||
106: Keyboard
|
||||
107: Tomato
|
||||
108: Lantern
|
||||
109: Machinery Vehicle
|
||||
110: Fan
|
||||
111: Green Vegetables
|
||||
112: Banana
|
||||
113: Baseball Glove
|
||||
114: Airplane
|
||||
115: Mouse
|
||||
116: Train
|
||||
117: Pumpkin
|
||||
118: Soccer
|
||||
119: Skiboard
|
||||
120: Luggage
|
||||
121: Nightstand
|
||||
122: Tea pot
|
||||
123: Telephone
|
||||
124: Trolley
|
||||
125: Head Phone
|
||||
126: Sports Car
|
||||
127: Stop Sign
|
||||
128: Dessert
|
||||
129: Scooter
|
||||
130: Stroller
|
||||
131: Crane
|
||||
132: Remote
|
||||
133: Refrigerator
|
||||
134: Oven
|
||||
135: Lemon
|
||||
136: Duck
|
||||
137: Baseball Bat
|
||||
138: Surveillance Camera
|
||||
139: Cat
|
||||
140: Jug
|
||||
141: Broccoli
|
||||
142: Piano
|
||||
143: Pizza
|
||||
144: Elephant
|
||||
145: Skateboard
|
||||
146: Surfboard
|
||||
147: Gun
|
||||
148: Skating and Skiing shoes
|
||||
149: Gas stove
|
||||
150: Donut
|
||||
151: Bow Tie
|
||||
152: Carrot
|
||||
153: Toilet
|
||||
154: Kite
|
||||
155: Strawberry
|
||||
156: Other Balls
|
||||
157: Shovel
|
||||
158: Pepper
|
||||
159: Computer Box
|
||||
160: Toilet Paper
|
||||
161: Cleaning Products
|
||||
162: Chopsticks
|
||||
163: Microwave
|
||||
164: Pigeon
|
||||
165: Baseball
|
||||
166: Cutting/chopping Board
|
||||
167: Coffee Table
|
||||
168: Side Table
|
||||
169: Scissors
|
||||
170: Marker
|
||||
171: Pie
|
||||
172: Ladder
|
||||
173: Snowboard
|
||||
174: Cookies
|
||||
175: Radiator
|
||||
176: Fire Hydrant
|
||||
177: Basketball
|
||||
178: Zebra
|
||||
179: Grape
|
||||
180: Giraffe
|
||||
181: Potato
|
||||
182: Sausage
|
||||
183: Tricycle
|
||||
184: Violin
|
||||
185: Egg
|
||||
186: Fire Extinguisher
|
||||
187: Candy
|
||||
188: Fire Truck
|
||||
189: Billiards
|
||||
190: Converter
|
||||
191: Bathtub
|
||||
192: Wheelchair
|
||||
193: Golf Club
|
||||
194: Briefcase
|
||||
195: Cucumber
|
||||
196: Cigar/Cigarette
|
||||
197: Paint Brush
|
||||
198: Pear
|
||||
199: Heavy Truck
|
||||
200: Hamburger
|
||||
201: Extractor
|
||||
202: Extension Cord
|
||||
203: Tong
|
||||
204: Tennis Racket
|
||||
205: Folder
|
||||
206: American Football
|
||||
207: earphone
|
||||
208: Mask
|
||||
209: Kettle
|
||||
210: Tennis
|
||||
211: Ship
|
||||
212: Swing
|
||||
213: Coffee Machine
|
||||
214: Slide
|
||||
215: Carriage
|
||||
216: Onion
|
||||
217: Green beans
|
||||
218: Projector
|
||||
219: Frisbee
|
||||
220: Washing Machine/Drying Machine
|
||||
221: Chicken
|
||||
222: Printer
|
||||
223: Watermelon
|
||||
224: Saxophone
|
||||
225: Tissue
|
||||
226: Toothbrush
|
||||
227: Ice cream
|
||||
228: Hot-air balloon
|
||||
229: Cello
|
||||
230: French Fries
|
||||
231: Scale
|
||||
232: Trophy
|
||||
233: Cabbage
|
||||
234: Hot dog
|
||||
235: Blender
|
||||
236: Peach
|
||||
237: Rice
|
||||
238: Wallet/Purse
|
||||
239: Volleyball
|
||||
240: Deer
|
||||
241: Goose
|
||||
242: Tape
|
||||
243: Tablet
|
||||
244: Cosmetics
|
||||
245: Trumpet
|
||||
246: Pineapple
|
||||
247: Golf Ball
|
||||
248: Ambulance
|
||||
249: Parking meter
|
||||
250: Mango
|
||||
251: Key
|
||||
252: Hurdle
|
||||
253: Fishing Rod
|
||||
254: Medal
|
||||
255: Flute
|
||||
256: Brush
|
||||
257: Penguin
|
||||
258: Megaphone
|
||||
259: Corn
|
||||
260: Lettuce
|
||||
261: Garlic
|
||||
262: Swan
|
||||
263: Helicopter
|
||||
264: Green Onion
|
||||
265: Sandwich
|
||||
266: Nuts
|
||||
267: Speed Limit Sign
|
||||
268: Induction Cooker
|
||||
269: Broom
|
||||
270: Trombone
|
||||
271: Plum
|
||||
272: Rickshaw
|
||||
273: Goldfish
|
||||
274: Kiwi fruit
|
||||
275: Router/modem
|
||||
276: Poker Card
|
||||
277: Toaster
|
||||
278: Shrimp
|
||||
279: Sushi
|
||||
280: Cheese
|
||||
281: Notepaper
|
||||
282: Cherry
|
||||
283: Pliers
|
||||
284: CD
|
||||
285: Pasta
|
||||
286: Hammer
|
||||
287: Cue
|
||||
288: Avocado
|
||||
289: Hamimelon
|
||||
290: Flask
|
||||
291: Mushroom
|
||||
292: Screwdriver
|
||||
293: Soap
|
||||
294: Recorder
|
||||
295: Bear
|
||||
296: Eggplant
|
||||
297: Board Eraser
|
||||
298: Coconut
|
||||
299: Tape Measure/Ruler
|
||||
300: Pig
|
||||
301: Showerhead
|
||||
302: Globe
|
||||
303: Chips
|
||||
304: Steak
|
||||
305: Crosswalk Sign
|
||||
306: Stapler
|
||||
307: Camel
|
||||
308: Formula 1
|
||||
309: Pomegranate
|
||||
310: Dishwasher
|
||||
311: Crab
|
||||
312: Hoverboard
|
||||
313: Meat ball
|
||||
314: Rice Cooker
|
||||
315: Tuba
|
||||
316: Calculator
|
||||
317: Papaya
|
||||
318: Antelope
|
||||
319: Parrot
|
||||
320: Seal
|
||||
321: Butterfly
|
||||
322: Dumbbell
|
||||
323: Donkey
|
||||
324: Lion
|
||||
325: Urinal
|
||||
326: Dolphin
|
||||
327: Electric Drill
|
||||
328: Hair Dryer
|
||||
329: Egg tart
|
||||
330: Jellyfish
|
||||
331: Treadmill
|
||||
332: Lighter
|
||||
333: Grapefruit
|
||||
334: Game board
|
||||
335: Mop
|
||||
336: Radish
|
||||
337: Baozi
|
||||
338: Target
|
||||
339: French
|
||||
340: Spring Rolls
|
||||
341: Monkey
|
||||
342: Rabbit
|
||||
343: Pencil Case
|
||||
344: Yak
|
||||
345: Red Cabbage
|
||||
346: Binoculars
|
||||
347: Asparagus
|
||||
348: Barbell
|
||||
349: Scallop
|
||||
350: Noddles
|
||||
351: Comb
|
||||
352: Dumpling
|
||||
353: Oyster
|
||||
354: Table Tennis paddle
|
||||
355: Cosmetics Brush/Eyeliner Pencil
|
||||
356: Chainsaw
|
||||
357: Eraser
|
||||
358: Lobster
|
||||
359: Durian
|
||||
360: Okra
|
||||
361: Lipstick
|
||||
362: Cosmetics Mirror
|
||||
363: Curling
|
||||
364: Table Tennis
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from tqdm import tqdm
|
||||
|
||||
from ultralytics.yolo.utils.checks import check_requirements
|
||||
from ultralytics.yolo.utils.downloads import download
|
||||
from ultralytics.yolo.utils.ops import xyxy2xywhn
|
||||
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
check_requirements(('pycocotools>=2.0',))
|
||||
from pycocotools.coco import COCO
|
||||
|
||||
# Make Directories
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
for p in 'images', 'labels':
|
||||
(dir / p).mkdir(parents=True, exist_ok=True)
|
||||
for q in 'train', 'val':
|
||||
(dir / p / q).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Train, Val Splits
|
||||
for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
|
||||
print(f"Processing {split} in {patches} patches ...")
|
||||
images, labels = dir / 'images' / split, dir / 'labels' / split
|
||||
|
||||
# Download
|
||||
url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
|
||||
if split == 'train':
|
||||
download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir) # annotations json
|
||||
download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, threads=8)
|
||||
elif split == 'val':
|
||||
download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir) # annotations json
|
||||
download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, threads=8)
|
||||
download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, threads=8)
|
||||
|
||||
# Move
|
||||
for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
|
||||
f.rename(images / f.name) # move to /images/{split}
|
||||
|
||||
# Labels
|
||||
coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
|
||||
names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
|
||||
for cid, cat in enumerate(names):
|
||||
catIds = coco.getCatIds(catNms=[cat])
|
||||
imgIds = coco.getImgIds(catIds=catIds)
|
||||
for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
|
||||
width, height = im["width"], im["height"]
|
||||
path = Path(im["file_name"]) # image filename
|
||||
try:
|
||||
with open(labels / path.with_suffix('.txt').name, 'a') as file:
|
||||
annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
|
||||
for a in coco.loadAnns(annIds):
|
||||
x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
|
||||
xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4)
|
||||
x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped
|
||||
file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
|
||||
except Exception as e:
|
||||
print(e)
|
@ -1,58 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
|
||||
# Example usage: python train.py --data SKU-110K.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── SKU-110K ← downloads here (13.6 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/SKU-110K # dataset root dir
|
||||
train: train.txt # train images (relative to 'path') 8219 images
|
||||
val: val.txt # val images (relative to 'path') 588 images
|
||||
test: test.txt # test images (optional) 2936 images
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: object
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from tqdm import tqdm
|
||||
|
||||
from ultralytics.yolo.utils.downloads import download
|
||||
from ultralytics.yolo.utils.ops import xyxy2xywh
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
parent = Path(dir.parent) # download dir
|
||||
urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
|
||||
download(urls, dir=parent)
|
||||
|
||||
# Rename directories
|
||||
if dir.exists():
|
||||
shutil.rmtree(dir)
|
||||
(parent / 'SKU110K_fixed').rename(dir) # rename dir
|
||||
(dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
|
||||
|
||||
# Convert labels
|
||||
names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
|
||||
for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
|
||||
x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
|
||||
images, unique_images = x[:, 0], np.unique(x[:, 0])
|
||||
with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
|
||||
f.writelines(f'./images/{s}\n' for s in unique_images)
|
||||
for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
|
||||
cls = 0 # single-class dataset
|
||||
with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
|
||||
for r in x[images == im]:
|
||||
w, h = r[6], r[7] # image width, height
|
||||
xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
|
||||
f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
|
@ -1,100 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
|
||||
# Example usage: python train.py --data VOC.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── VOC ← downloads here (2.8 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/VOC
|
||||
train: # train images (relative to 'path') 16551 images
|
||||
- images/train2012
|
||||
- images/train2007
|
||||
- images/val2012
|
||||
- images/val2007
|
||||
val: # val images (relative to 'path') 4952 images
|
||||
- images/test2007
|
||||
test: # test images (optional)
|
||||
- images/test2007
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: aeroplane
|
||||
1: bicycle
|
||||
2: bird
|
||||
3: boat
|
||||
4: bottle
|
||||
5: bus
|
||||
6: car
|
||||
7: cat
|
||||
8: chair
|
||||
9: cow
|
||||
10: diningtable
|
||||
11: dog
|
||||
12: horse
|
||||
13: motorbike
|
||||
14: person
|
||||
15: pottedplant
|
||||
16: sheep
|
||||
17: sofa
|
||||
18: train
|
||||
19: tvmonitor
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
from tqdm import tqdm
|
||||
from ultralytics.yolo.utils.downloads import download
|
||||
from pathlib import Path
|
||||
|
||||
def convert_label(path, lb_path, year, image_id):
|
||||
def convert_box(size, box):
|
||||
dw, dh = 1. / size[0], 1. / size[1]
|
||||
x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
|
||||
return x * dw, y * dh, w * dw, h * dh
|
||||
|
||||
in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
|
||||
out_file = open(lb_path, 'w')
|
||||
tree = ET.parse(in_file)
|
||||
root = tree.getroot()
|
||||
size = root.find('size')
|
||||
w = int(size.find('width').text)
|
||||
h = int(size.find('height').text)
|
||||
|
||||
names = list(yaml['names'].values()) # names list
|
||||
for obj in root.iter('object'):
|
||||
cls = obj.find('name').text
|
||||
if cls in names and int(obj.find('difficult').text) != 1:
|
||||
xmlbox = obj.find('bndbox')
|
||||
bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
|
||||
cls_id = names.index(cls) # class id
|
||||
out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
|
||||
urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
|
||||
f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
|
||||
f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
|
||||
download(urls, dir=dir / 'images', curl=True, threads=3)
|
||||
|
||||
# Convert
|
||||
path = dir / 'images/VOCdevkit'
|
||||
for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
|
||||
imgs_path = dir / 'images' / f'{image_set}{year}'
|
||||
lbs_path = dir / 'labels' / f'{image_set}{year}'
|
||||
imgs_path.mkdir(exist_ok=True, parents=True)
|
||||
lbs_path.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
|
||||
image_ids = f.read().strip().split()
|
||||
for id in tqdm(image_ids, desc=f'{image_set}{year}'):
|
||||
f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
|
||||
lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
|
||||
f.rename(imgs_path / f.name) # move image
|
||||
convert_label(path, lb_path, year, id) # convert labels to YOLO format
|
@ -1,73 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
|
||||
# Example usage: python train.py --data VisDrone.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── VisDrone ← downloads here (2.3 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/VisDrone # dataset root dir
|
||||
train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
|
||||
val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
|
||||
test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: pedestrian
|
||||
1: people
|
||||
2: bicycle
|
||||
3: car
|
||||
4: van
|
||||
5: truck
|
||||
6: tricycle
|
||||
7: awning-tricycle
|
||||
8: bus
|
||||
9: motor
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from ultralytics.yolo.utils.downloads import download
|
||||
|
||||
def visdrone2yolo(dir):
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
def convert_box(size, box):
|
||||
# Convert VisDrone box to YOLO xywh box
|
||||
dw = 1. / size[0]
|
||||
dh = 1. / size[1]
|
||||
return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
|
||||
|
||||
(dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
|
||||
pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
|
||||
for f in pbar:
|
||||
img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
|
||||
lines = []
|
||||
with open(f, 'r') as file: # read annotation.txt
|
||||
for row in [x.split(',') for x in file.read().strip().splitlines()]:
|
||||
if row[4] == '0': # VisDrone 'ignored regions' class 0
|
||||
continue
|
||||
cls = int(row[5]) - 1
|
||||
box = convert_box(img_size, tuple(map(int, row[:4])))
|
||||
lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
|
||||
with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
|
||||
fl.writelines(lines) # write label.txt
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
|
||||
download(urls, dir=dir, curl=True, threads=4)
|
||||
|
||||
# Convert
|
||||
for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
|
||||
visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
|
@ -1,115 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# COCO 2017 dataset http://cocodataset.org by Microsoft
|
||||
# Example usage: python train.py --data coco.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco ← downloads here (20.1 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco # dataset root dir
|
||||
train: train2017.txt # train images (relative to 'path') 118287 images
|
||||
val: val2017.txt # val images (relative to 'path') 5000 images
|
||||
test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: |
|
||||
from ultralytics.yolo.utils.downloads import download
|
||||
from pathlib import Path
|
||||
|
||||
# Download labels
|
||||
segments = True # segment or box labels
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
|
||||
urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
|
||||
download(urls, dir=dir.parent)
|
||||
# Download data
|
||||
urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
|
||||
'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
|
||||
'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
|
||||
download(urls, dir=dir / 'images', threads=3)
|
@ -1,101 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
|
||||
# Example usage: python train.py --data coco128.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco128-seg ← downloads here (7 MB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco128-seg # dataset root dir
|
||||
train: images/train2017 # train images (relative to 'path') 128 images
|
||||
val: images/train2017 # val images (relative to 'path') 128 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: https://ultralytics.com/assets/coco128-seg.zip
|
@ -1,101 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
|
||||
# Example usage: python train.py --data coco128.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco128 ← downloads here (7 MB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco128 # dataset root dir
|
||||
train: images/train2017 # train images (relative to 'path') 128 images
|
||||
val: images/train2017 # val images (relative to 'path') 128 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: https://ultralytics.com/assets/coco128.zip
|
@ -1,101 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# COCO8-seg dataset (first 8 images from COCO train2017) by Ultralytics
|
||||
# Example usage: python train.py --data coco8-seg.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco8-seg ← downloads here (1 MB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco8-seg # dataset root dir
|
||||
train: images/train # train images (relative to 'path') 4 images
|
||||
val: images/val # val images (relative to 'path') 4 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: https://ultralytics.com/assets/coco8-seg.zip
|
@ -1,101 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# COCO8 dataset (first 8 images from COCO train2017) by Ultralytics
|
||||
# Example usage: python train.py --data coco8.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco8 ← downloads here (1 MB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco8 # dataset root dir
|
||||
train: images/train # train images (relative to 'path') 4 images
|
||||
val: images/val # val images (relative to 'path') 4 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: https://ultralytics.com/assets/coco8.zip
|
@ -1,153 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
|
||||
# -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! --------
|
||||
# Example usage: python train.py --data xView.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── xView ← downloads here (20.7 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/xView # dataset root dir
|
||||
train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images
|
||||
val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: Fixed-wing Aircraft
|
||||
1: Small Aircraft
|
||||
2: Cargo Plane
|
||||
3: Helicopter
|
||||
4: Passenger Vehicle
|
||||
5: Small Car
|
||||
6: Bus
|
||||
7: Pickup Truck
|
||||
8: Utility Truck
|
||||
9: Truck
|
||||
10: Cargo Truck
|
||||
11: Truck w/Box
|
||||
12: Truck Tractor
|
||||
13: Trailer
|
||||
14: Truck w/Flatbed
|
||||
15: Truck w/Liquid
|
||||
16: Crane Truck
|
||||
17: Railway Vehicle
|
||||
18: Passenger Car
|
||||
19: Cargo Car
|
||||
20: Flat Car
|
||||
21: Tank car
|
||||
22: Locomotive
|
||||
23: Maritime Vessel
|
||||
24: Motorboat
|
||||
25: Sailboat
|
||||
26: Tugboat
|
||||
27: Barge
|
||||
28: Fishing Vessel
|
||||
29: Ferry
|
||||
30: Yacht
|
||||
31: Container Ship
|
||||
32: Oil Tanker
|
||||
33: Engineering Vehicle
|
||||
34: Tower crane
|
||||
35: Container Crane
|
||||
36: Reach Stacker
|
||||
37: Straddle Carrier
|
||||
38: Mobile Crane
|
||||
39: Dump Truck
|
||||
40: Haul Truck
|
||||
41: Scraper/Tractor
|
||||
42: Front loader/Bulldozer
|
||||
43: Excavator
|
||||
44: Cement Mixer
|
||||
45: Ground Grader
|
||||
46: Hut/Tent
|
||||
47: Shed
|
||||
48: Building
|
||||
49: Aircraft Hangar
|
||||
50: Damaged Building
|
||||
51: Facility
|
||||
52: Construction Site
|
||||
53: Vehicle Lot
|
||||
54: Helipad
|
||||
55: Storage Tank
|
||||
56: Shipping container lot
|
||||
57: Shipping Container
|
||||
58: Pylon
|
||||
59: Tower
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
from ultralytics.yolo.data.dataloaders.v5loader import autosplit
|
||||
from ultralytics.yolo.utils.ops import xyxy2xywhn
|
||||
|
||||
|
||||
def convert_labels(fname=Path('xView/xView_train.geojson')):
|
||||
# Convert xView geoJSON labels to YOLO format
|
||||
path = fname.parent
|
||||
with open(fname) as f:
|
||||
print(f'Loading {fname}...')
|
||||
data = json.load(f)
|
||||
|
||||
# Make dirs
|
||||
labels = Path(path / 'labels' / 'train')
|
||||
os.system(f'rm -rf {labels}')
|
||||
labels.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# xView classes 11-94 to 0-59
|
||||
xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11,
|
||||
12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1,
|
||||
29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46,
|
||||
47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59]
|
||||
|
||||
shapes = {}
|
||||
for feature in tqdm(data['features'], desc=f'Converting {fname}'):
|
||||
p = feature['properties']
|
||||
if p['bounds_imcoords']:
|
||||
id = p['image_id']
|
||||
file = path / 'train_images' / id
|
||||
if file.exists(): # 1395.tif missing
|
||||
try:
|
||||
box = np.array([int(num) for num in p['bounds_imcoords'].split(",")])
|
||||
assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}'
|
||||
cls = p['type_id']
|
||||
cls = xview_class2index[int(cls)] # xView class to 0-60
|
||||
assert 59 >= cls >= 0, f'incorrect class index {cls}'
|
||||
|
||||
# Write YOLO label
|
||||
if id not in shapes:
|
||||
shapes[id] = Image.open(file).size
|
||||
box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
|
||||
with open((labels / id).with_suffix('.txt'), 'a') as f:
|
||||
f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
|
||||
except Exception as e:
|
||||
print(f'WARNING: skipping one label for {file}: {e}')
|
||||
|
||||
|
||||
# Download manually from https://challenge.xviewdataset.org
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
# urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels
|
||||
# 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images
|
||||
# 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels)
|
||||
# download(urls, dir=dir)
|
||||
|
||||
# Convert labels
|
||||
convert_labels(dir / 'xView_train.geojson')
|
||||
|
||||
# Move images
|
||||
images = Path(dir / 'images')
|
||||
images.mkdir(parents=True, exist_ok=True)
|
||||
Path(dir / 'train_images').rename(dir / 'images' / 'train')
|
||||
Path(dir / 'val_images').rename(dir / 'images' / 'val')
|
||||
|
||||
# Split
|
||||
autosplit(dir / 'images' / 'train')
|
@ -18,29 +18,28 @@ TensorFlow.js | `tfjs` | yolov8n_web_model/
|
||||
PaddlePaddle | `paddle` | yolov8n_paddle_model/
|
||||
|
||||
Requirements:
|
||||
$ pip install -r requirements.txt coremltools onnx onnxsim onnxruntime openvino-dev tensorflow-cpu # CPU
|
||||
$ pip install -r requirements.txt coremltools onnx onnxsim onnxruntime-gpu openvino-dev tensorflow # GPU
|
||||
$ pip install ultralytics[export]
|
||||
|
||||
Python:
|
||||
from ultralytics import YOLO
|
||||
model = YOLO('yolov8n.yaml')
|
||||
model = YOLO('yolov8n.pt')
|
||||
results = model.export(format='onnx')
|
||||
|
||||
CLI:
|
||||
$ yolo mode=export model=yolov8n.pt format=onnx
|
||||
|
||||
Inference:
|
||||
$ python detect.py --weights yolov8n.pt # PyTorch
|
||||
yolov8n.torchscript # TorchScript
|
||||
yolov8n.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov8n_openvino_model # OpenVINO
|
||||
yolov8n.engine # TensorRT
|
||||
yolov8n.mlmodel # CoreML (macOS-only)
|
||||
yolov8n_saved_model # TensorFlow SavedModel
|
||||
yolov8n.pb # TensorFlow GraphDef
|
||||
yolov8n.tflite # TensorFlow Lite
|
||||
yolov8n_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov8n_paddle_model # PaddlePaddle
|
||||
$ yolo predict model=yolov8n.pt # PyTorch
|
||||
yolov8n.torchscript # TorchScript
|
||||
yolov8n.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov8n_openvino_model # OpenVINO
|
||||
yolov8n.engine # TensorRT
|
||||
yolov8n.mlmodel # CoreML (macOS-only)
|
||||
yolov8n_saved_model # TensorFlow SavedModel
|
||||
yolov8n.pb # TensorFlow GraphDef
|
||||
yolov8n.tflite # TensorFlow Lite
|
||||
yolov8n_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov8n_paddle_model # PaddlePaddle
|
||||
|
||||
TensorFlow.js:
|
||||
$ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
|
||||
@ -64,12 +63,12 @@ import pandas as pd
|
||||
import torch
|
||||
|
||||
from ultralytics.nn.autobackend import check_class_names
|
||||
from ultralytics.nn.modules import Detect, Segment
|
||||
from ultralytics.nn.modules import C2f, Detect, Segment
|
||||
from ultralytics.nn.tasks import DetectionModel, SegmentationModel
|
||||
from ultralytics.yolo.cfg import get_cfg
|
||||
from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages
|
||||
from ultralytics.yolo.data.utils import IMAGENET_MEAN, IMAGENET_STD, check_det_dataset
|
||||
from ultralytics.yolo.utils import (DEFAULT_CFG, LINUX, LOGGER, MACOS, WINDOWS, __version__, callbacks, colorstr,
|
||||
from ultralytics.yolo.utils import (DEFAULT_CFG, LINUX, LOGGER, MACOS, __version__, callbacks, colorstr,
|
||||
get_default_args, yaml_save)
|
||||
from ultralytics.yolo.utils.checks import check_imgsz, check_requirements, check_version, check_yaml
|
||||
from ultralytics.yolo.utils.files import file_size
|
||||
@ -77,6 +76,7 @@ from ultralytics.yolo.utils.ops import Profile
|
||||
from ultralytics.yolo.utils.torch_utils import get_latest_opset, select_device, smart_inference_mode
|
||||
|
||||
CUDA = torch.cuda.is_available()
|
||||
ARM64 = platform.machine() in ('arm64', 'aarch64')
|
||||
|
||||
|
||||
def export_formats():
|
||||
@ -157,11 +157,10 @@ class Exporter:
|
||||
|
||||
# Load PyTorch model
|
||||
self.device = select_device('cpu' if self.args.device is None else self.args.device)
|
||||
if self.args.half:
|
||||
if self.device.type == 'cpu' and not coreml and not xml:
|
||||
LOGGER.info('half=True only compatible with GPU or CoreML export, i.e. use device=0 or format=coreml')
|
||||
self.args.half = False
|
||||
assert not self.args.dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic'
|
||||
if self.args.half and onnx and self.device.type == 'cpu':
|
||||
LOGGER.warning('WARNING ⚠️ half=True only compatible with GPU export, i.e. use device=0')
|
||||
self.args.half = False
|
||||
assert not self.args.dynamic, 'half=True not compatible with dynamic=True, i.e. use only one.'
|
||||
|
||||
# Checks
|
||||
model.names = check_class_names(model.names)
|
||||
@ -188,11 +187,15 @@ class Exporter:
|
||||
if isinstance(m, (Detect, Segment)):
|
||||
m.dynamic = self.args.dynamic
|
||||
m.export = True
|
||||
m.format = self.args.format
|
||||
elif isinstance(m, C2f) and not edgetpu:
|
||||
# EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
|
||||
m.forward = m.forward_split
|
||||
|
||||
y = None
|
||||
for _ in range(2):
|
||||
y = model(im) # dry runs
|
||||
if self.args.half and not coreml and not xml:
|
||||
if self.args.half and (engine or onnx) and self.device.type != 'cpu':
|
||||
im, model = im.half(), model.half() # to FP16
|
||||
|
||||
# Warnings
|
||||
@ -207,7 +210,7 @@ class Exporter:
|
||||
self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else tuple(tuple(x.shape) for x in y)
|
||||
self.pretty_name = self.file.stem.replace('yolo', 'YOLO')
|
||||
self.metadata = {
|
||||
'description': f'Ultralytics {self.pretty_name} model trained on {self.args.data}',
|
||||
'description': f'Ultralytics {self.pretty_name} model trained on {Path(self.args.data).name}',
|
||||
'author': 'Ultralytics',
|
||||
'license': 'GPL-3.0 https://ultralytics.com/license',
|
||||
'version': __version__,
|
||||
@ -233,19 +236,16 @@ class Exporter:
|
||||
LOGGER.warning('WARNING ⚠️ YOLOv8 TensorFlow export is still under development. '
|
||||
'Please consider contributing to the effort if you have TF expertise. Thank you!')
|
||||
nms = False
|
||||
self.args.int8 |= edgetpu
|
||||
f[5], s_model = self._export_saved_model(nms=nms or self.args.agnostic_nms or tfjs,
|
||||
agnostic_nms=self.args.agnostic_nms or tfjs)
|
||||
if pb or tfjs: # pb prerequisite to tfjs
|
||||
f[6], _ = self._export_pb(s_model)
|
||||
if tflite or edgetpu:
|
||||
f[7] = str(Path(f[5]) / (self.file.stem + '_float16.tflite'))
|
||||
# f[7], _ = self._export_tflite(s_model,
|
||||
# int8=self.args.int8 or edgetpu,
|
||||
# data=self.args.data,
|
||||
# nms=nms,
|
||||
# agnostic_nms=self.args.agnostic_nms)
|
||||
if tflite:
|
||||
f[7], _ = self._export_tflite(s_model, nms=nms, agnostic_nms=self.args.agnostic_nms)
|
||||
if edgetpu:
|
||||
f[8], _ = self._export_edgetpu(tflite_model=f[7])
|
||||
f[8], _ = self._export_edgetpu(tflite_model=str(
|
||||
Path(f[5]) / (self.file.stem + '_full_integer_quant.tflite'))) # int8 in/out
|
||||
if tfjs:
|
||||
f[9], _ = self._export_tfjs()
|
||||
if paddle: # PaddlePaddle
|
||||
@ -263,8 +263,8 @@ class Exporter:
|
||||
LOGGER.info(
|
||||
f'\nExport complete ({time.time() - t:.1f}s)'
|
||||
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
|
||||
f'\nPredict: yolo task={model.task} mode=predict model={f} imgsz={imgsz} {data}'
|
||||
f'\nValidate: yolo task={model.task} mode=val model={f} imgsz={imgsz} data={self.args.data} {s}'
|
||||
f'\nPredict: yolo predict task={model.task} model={f} imgsz={imgsz} {data}'
|
||||
f'\nValidate: yolo val task={model.task} model={f} imgsz={imgsz} data={self.args.data} {s}'
|
||||
f'\nVisualize: https://netron.app')
|
||||
|
||||
self.run_callbacks('on_export_end')
|
||||
@ -319,25 +319,27 @@ class Exporter:
|
||||
|
||||
# Checks
|
||||
model_onnx = onnx.load(f) # load onnx model
|
||||
onnx.checker.check_model(model_onnx) # check onnx model
|
||||
|
||||
# Metadata
|
||||
d = {'stride': int(max(self.model.stride)), 'names': self.model.names}
|
||||
for k, v in d.items():
|
||||
meta = model_onnx.metadata_props.add()
|
||||
meta.key, meta.value = k, str(v)
|
||||
onnx.save(model_onnx, f)
|
||||
# onnx.checker.check_model(model_onnx) # check onnx model
|
||||
|
||||
# Simplify
|
||||
if self.args.simplify:
|
||||
try:
|
||||
check_requirements('onnxsim')
|
||||
check_requirements(('onnxsim', 'onnxruntime-gpu' if CUDA else 'onnxruntime'))
|
||||
import onnxsim
|
||||
|
||||
LOGGER.info(f'{prefix} simplifying with onnxsim {onnxsim.__version__}...')
|
||||
subprocess.run(f'onnxsim {f} {f}', shell=True)
|
||||
# subprocess.run(f'onnxsim {f} {f}', shell=True)
|
||||
model_onnx, check = onnxsim.simplify(model_onnx)
|
||||
assert check, 'Simplified ONNX model could not be validated'
|
||||
except Exception as e:
|
||||
LOGGER.info(f'{prefix} simplifier failure: {e}')
|
||||
|
||||
# Metadata
|
||||
for k, v in self.metadata.items():
|
||||
meta = model_onnx.metadata_props.add()
|
||||
meta.key, meta.value = k, str(v)
|
||||
|
||||
onnx.save(model_onnx, f)
|
||||
return f, model_onnx
|
||||
|
||||
@try_export
|
||||
@ -402,7 +404,7 @@ class Exporter:
|
||||
if self.model.task == 'classify':
|
||||
bias = [-x for x in IMAGENET_MEAN]
|
||||
scale = 1 / 255 / (sum(IMAGENET_STD) / 3)
|
||||
classifier_config = ct.ClassifierConfig(list(self.model.names.values()))
|
||||
classifier_config = ct.ClassifierConfig(list(self.model.names.values())) if self.args.nms else None
|
||||
else:
|
||||
bias = [0.0, 0.0, 0.0]
|
||||
scale = 1 / 255
|
||||
@ -414,10 +416,7 @@ class Exporter:
|
||||
classifier_config=classifier_config)
|
||||
bits, mode = (8, 'kmeans_lut') if self.args.int8 else (16, 'linear') if self.args.half else (32, None)
|
||||
if bits < 32:
|
||||
if MACOS: # quantization only supported on macOS
|
||||
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
|
||||
else:
|
||||
LOGGER.info(f'{prefix} quantization only supported on macOS, skipping...')
|
||||
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
|
||||
if self.args.nms:
|
||||
ct_model = self._pipeline_coreml(ct_model)
|
||||
|
||||
@ -440,11 +439,11 @@ class Exporter:
|
||||
import tensorrt as trt # noqa
|
||||
|
||||
check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=8.0.0
|
||||
self._export_onnx()
|
||||
onnx = self.file.with_suffix('.onnx')
|
||||
self.args.simplify = True
|
||||
f_onnx, _ = self._export_onnx()
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
|
||||
assert onnx.exists(), f'failed to export ONNX file: {onnx}'
|
||||
assert Path(f_onnx).exists(), f'failed to export ONNX file: {f_onnx}'
|
||||
f = self.file.with_suffix('.engine') # TensorRT engine file
|
||||
logger = trt.Logger(trt.Logger.INFO)
|
||||
if verbose:
|
||||
@ -458,8 +457,8 @@ class Exporter:
|
||||
flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
|
||||
network = builder.create_network(flag)
|
||||
parser = trt.OnnxParser(network, logger)
|
||||
if not parser.parse_from_file(str(onnx)):
|
||||
raise RuntimeError(f'failed to load ONNX file: {onnx}')
|
||||
if not parser.parse_from_file(f_onnx):
|
||||
raise RuntimeError(f'failed to load ONNX file: {f_onnx}')
|
||||
|
||||
inputs = [network.get_input(i) for i in range(network.num_inputs)]
|
||||
outputs = [network.get_output(i) for i in range(network.num_outputs)]
|
||||
@ -507,77 +506,37 @@ class Exporter:
|
||||
try:
|
||||
import tensorflow as tf # noqa
|
||||
except ImportError:
|
||||
check_requirements(f"tensorflow{'' if CUDA else '-macos' if MACOS else '-cpu' if LINUX else ''}")
|
||||
check_requirements(f"tensorflow{'-macos' if MACOS else '-aarch64' if ARM64 else '' if CUDA else '-cpu'}")
|
||||
import tensorflow as tf # noqa
|
||||
check_requirements(('onnx', 'onnx2tf', 'sng4onnx', 'onnxsim', 'onnx_graphsurgeon', 'tflite_support'),
|
||||
check_requirements(('onnx', 'onnx2tf', 'sng4onnx', 'onnxsim', 'onnx_graphsurgeon', 'tflite_support',
|
||||
'onnxruntime-gpu' if CUDA else 'onnxruntime'),
|
||||
cmds='--extra-index-url https://pypi.ngc.nvidia.com')
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
f = str(self.file).replace(self.file.suffix, '_saved_model')
|
||||
f = Path(str(self.file).replace(self.file.suffix, '_saved_model'))
|
||||
if f.is_dir():
|
||||
import shutil
|
||||
shutil.rmtree(f) # delete output folder
|
||||
|
||||
# Export to ONNX
|
||||
self._export_onnx()
|
||||
onnx = self.file.with_suffix('.onnx')
|
||||
self.args.simplify = True
|
||||
f_onnx, _ = self._export_onnx()
|
||||
|
||||
# Export to TF SavedModel
|
||||
subprocess.run(f'onnx2tf -i {onnx} -o {f} --non_verbose', shell=True)
|
||||
yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml
|
||||
# Export to TF
|
||||
int8 = '-oiqt -qt per-tensor' if self.args.int8 else ''
|
||||
cmd = f'onnx2tf -i {f_onnx} -o {f} --non_verbose {int8}'
|
||||
LOGGER.info(f'\n{prefix} running {cmd}')
|
||||
subprocess.run(cmd, shell=True)
|
||||
yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml
|
||||
|
||||
# Add TFLite metadata
|
||||
for file in Path(f).rglob('*.tflite'):
|
||||
for file in f.rglob('*.tflite'):
|
||||
self._add_tflite_metadata(file)
|
||||
|
||||
# Load saved_model
|
||||
keras_model = tf.saved_model.load(f, tags=None, options=None)
|
||||
|
||||
return f, keras_model
|
||||
|
||||
@try_export
|
||||
def _export_saved_model_OLD(self,
|
||||
nms=False,
|
||||
agnostic_nms=False,
|
||||
topk_per_class=100,
|
||||
topk_all=100,
|
||||
iou_thres=0.45,
|
||||
conf_thres=0.25,
|
||||
prefix=colorstr('TensorFlow SavedModel:')):
|
||||
# YOLOv8 TensorFlow SavedModel export
|
||||
try:
|
||||
import tensorflow as tf # noqa
|
||||
except ImportError:
|
||||
check_requirements(f"tensorflow{'' if CUDA else '-macos' if MACOS else '-cpu' if LINUX else ''}")
|
||||
import tensorflow as tf # noqa
|
||||
# from models.tf import TFModel
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 # noqa
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
f = str(self.file).replace(self.file.suffix, '_saved_model')
|
||||
batch_size, ch, *imgsz = list(self.im.shape) # BCHW
|
||||
|
||||
tf_models = None # TODO: no TF modules available
|
||||
tf_model = tf_models.TFModel(cfg=self.model.yaml, model=self.model.cpu(), nc=self.model.nc, imgsz=imgsz)
|
||||
im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
|
||||
_ = tf_model.predict(im, nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
||||
inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if self.args.dynamic else batch_size)
|
||||
outputs = tf_model.predict(inputs, nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
||||
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
|
||||
keras_model.trainable = False
|
||||
keras_model.summary()
|
||||
if self.args.keras:
|
||||
keras_model.save(f, save_format='tf')
|
||||
else:
|
||||
spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
|
||||
m = tf.function(lambda x: keras_model(x)) # full model
|
||||
m = m.get_concrete_function(spec)
|
||||
frozen_func = convert_variables_to_constants_v2(m)
|
||||
tfm = tf.Module()
|
||||
tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if nms else frozen_func(x), [spec])
|
||||
tfm.__call__(im)
|
||||
tf.saved_model.save(tfm,
|
||||
f,
|
||||
options=tf.saved_model.SaveOptions(experimental_custom_gradients=False)
|
||||
if check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions())
|
||||
return f, keras_model
|
||||
return str(f), keras_model
|
||||
|
||||
@try_export
|
||||
def _export_pb(self, keras_model, prefix=colorstr('TensorFlow GraphDef:')):
|
||||
@ -596,8 +555,18 @@ class Exporter:
|
||||
return f, None
|
||||
|
||||
@try_export
|
||||
def _export_tflite(self, keras_model, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')):
|
||||
def _export_tflite(self, keras_model, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')):
|
||||
# YOLOv8 TensorFlow Lite export
|
||||
saved_model = Path(str(self.file).replace(self.file.suffix, '_saved_model'))
|
||||
if self.args.int8:
|
||||
f = saved_model / (self.file.stem + 'yolov8n_integer_quant.tflite') # fp32 in/out
|
||||
elif self.args.half:
|
||||
f = saved_model / (self.file.stem + '_float16.tflite')
|
||||
else:
|
||||
f = saved_model / (self.file.stem + '_float32.tflite')
|
||||
return str(f), None # noqa
|
||||
|
||||
# OLD VERSION BELOW ---------------------------------------------------------------
|
||||
import tensorflow as tf # noqa
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
@ -608,7 +577,7 @@ class Exporter:
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
|
||||
converter.target_spec.supported_types = [tf.float16]
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
if int8:
|
||||
if self.args.int8:
|
||||
|
||||
def representative_dataset_gen(dataset, n_images=100):
|
||||
# Dataset generator for use with converter.representative_dataset, returns a generator of np arrays
|
||||
@ -620,7 +589,7 @@ class Exporter:
|
||||
if n >= n_images:
|
||||
break
|
||||
|
||||
dataset = LoadImages(check_det_dataset(check_yaml(data))['train'], imgsz=imgsz, auto=False)
|
||||
dataset = LoadImages(check_det_dataset(check_yaml(self.args.data))['train'], imgsz=imgsz, auto=False)
|
||||
converter.representative_dataset = lambda: representative_dataset_gen(dataset, n_images=100)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||
converter.target_spec.supported_types = []
|
||||
@ -641,7 +610,7 @@ class Exporter:
|
||||
cmd = 'edgetpu_compiler --version'
|
||||
help_url = 'https://coral.ai/docs/edgetpu/compiler/'
|
||||
assert LINUX, f'export only supported on Linux. See {help_url}'
|
||||
if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0:
|
||||
if subprocess.run(f'{cmd} > /dev/null', shell=True).returncode != 0:
|
||||
LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
|
||||
sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
|
||||
for c in (
|
||||
@ -656,7 +625,7 @@ class Exporter:
|
||||
LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
|
||||
f = str(tflite_model).replace('.tflite', '_edgetpu.tflite') # Edge TPU model
|
||||
|
||||
cmd = f'edgetpu_compiler -s -d -k 10 --out_dir {self.file.parent} {tflite_model}'
|
||||
cmd = f'edgetpu_compiler -s -d -k 10 --out_dir {Path(f).parent} {tflite_model}'
|
||||
subprocess.run(cmd.split(), check=True)
|
||||
self._add_tflite_metadata(f)
|
||||
return f, None
|
||||
@ -674,7 +643,7 @@ class Exporter:
|
||||
|
||||
cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \
|
||||
f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}'
|
||||
subprocess.run(cmd.split())
|
||||
subprocess.run(cmd.split(), check=True)
|
||||
|
||||
with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
|
||||
subst = re.sub(
|
||||
@ -698,14 +667,23 @@ class Exporter:
|
||||
from tflite_support import metadata as _metadata # noqa
|
||||
from tflite_support import metadata_schema_py_generated as _metadata_fb # noqa
|
||||
|
||||
# Creates model info.
|
||||
# Create model info
|
||||
model_meta = _metadata_fb.ModelMetadataT()
|
||||
model_meta.name = self.metadata['description']
|
||||
model_meta.version = self.metadata['version']
|
||||
model_meta.author = self.metadata['author']
|
||||
model_meta.license = self.metadata['license']
|
||||
|
||||
# Creates input info.
|
||||
# Label file
|
||||
tmp_file = file.parent / 'temp_meta.txt'
|
||||
with open(tmp_file, 'w') as f:
|
||||
f.write(str(self.metadata))
|
||||
|
||||
label_file = _metadata_fb.AssociatedFileT()
|
||||
label_file.name = tmp_file.name
|
||||
label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS
|
||||
|
||||
# Create input info
|
||||
input_meta = _metadata_fb.TensorMetadataT()
|
||||
input_meta.name = 'image'
|
||||
input_meta.description = 'Input image to be detected.'
|
||||
@ -714,25 +692,21 @@ class Exporter:
|
||||
input_meta.content.contentProperties.colorSpace = _metadata_fb.ColorSpaceType.RGB
|
||||
input_meta.content.contentPropertiesType = _metadata_fb.ContentProperties.ImageProperties
|
||||
|
||||
# Creates output info.
|
||||
output_meta = _metadata_fb.TensorMetadataT()
|
||||
output_meta.name = 'output'
|
||||
output_meta.description = 'Coordinates of detected objects, class labels, and confidence score.'
|
||||
# Create output info
|
||||
output1 = _metadata_fb.TensorMetadataT()
|
||||
output1.name = 'output'
|
||||
output1.description = 'Coordinates of detected objects, class labels, and confidence score'
|
||||
output1.associatedFiles = [label_file]
|
||||
if self.model.task == 'segment':
|
||||
output2 = _metadata_fb.TensorMetadataT()
|
||||
output2.name = 'output'
|
||||
output2.description = 'Mask protos'
|
||||
output2.associatedFiles = [label_file]
|
||||
|
||||
# Label file
|
||||
tmp_file = Path('/tmp/meta.txt')
|
||||
with open(tmp_file, 'w') as meta_f:
|
||||
meta_f.write(str(self.metadata))
|
||||
|
||||
label_file = _metadata_fb.AssociatedFileT()
|
||||
label_file.name = tmp_file.name
|
||||
label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS
|
||||
output_meta.associatedFiles = [label_file]
|
||||
|
||||
# Creates subgraph info.
|
||||
# Create subgraph info
|
||||
subgraph = _metadata_fb.SubGraphMetadataT()
|
||||
subgraph.inputTensorMetadata = [input_meta]
|
||||
subgraph.outputTensorMetadata = [output_meta]
|
||||
subgraph.outputTensorMetadata = [output1, output2] if self.model.task == 'segment' else [output1]
|
||||
model_meta.subgraphMetadata = [subgraph]
|
||||
|
||||
b = flatbuffers.Builder(0)
|
||||
|
@ -29,14 +29,45 @@ MODEL_MAP = {
|
||||
|
||||
class YOLO:
|
||||
"""
|
||||
YOLO
|
||||
YOLO (You Only Look Once) object detection model.
|
||||
|
||||
A python interface which emulates a model-like behaviour by wrapping trainers.
|
||||
"""
|
||||
Args:
|
||||
model (str or Path): Path to the model file to load or create.
|
||||
type (str): Type/version of models to use. Defaults to "v8".
|
||||
|
||||
Attributes:
|
||||
type (str): Type/version of models being used.
|
||||
ModelClass (Any): Model class.
|
||||
TrainerClass (Any): Trainer class.
|
||||
ValidatorClass (Any): Validator class.
|
||||
PredictorClass (Any): Predictor class.
|
||||
predictor (Any): Predictor object.
|
||||
model (Any): Model object.
|
||||
trainer (Any): Trainer object.
|
||||
task (str): Type of model task.
|
||||
ckpt (Any): Checkpoint object if model loaded from *.pt file.
|
||||
cfg (str): Model configuration if loaded from *.yaml file.
|
||||
ckpt_path (str): Checkpoint file path.
|
||||
overrides (dict): Overrides for trainer object.
|
||||
metrics_data (Any): Data for metrics.
|
||||
|
||||
Methods:
|
||||
__call__(): Alias for predict method.
|
||||
_new(cfg, verbose=True): Initializes a new model and infers the task type from the model definitions.
|
||||
_load(weights): Initializes a new model and infers the task type from the model head.
|
||||
_check_is_pytorch_model(): Raises TypeError if model is not a PyTorch model.
|
||||
reset(): Resets the model modules.
|
||||
info(verbose=False): Logs model info.
|
||||
fuse(): Fuse model for faster inference.
|
||||
predict(source=None, stream=False, **kwargs): Perform prediction using the YOLO model.
|
||||
|
||||
Returns:
|
||||
List[ultralytics.yolo.engine.results.Results]: The prediction results.
|
||||
"""
|
||||
|
||||
def __init__(self, model='yolov8n.pt', type='v8') -> None:
|
||||
"""
|
||||
Initializes the YOLO object.
|
||||
Initializes the YOLO model.
|
||||
|
||||
Args:
|
||||
model (str, Path): model to load or create
|
||||
@ -97,11 +128,12 @@ class YOLO:
|
||||
self.task = self.model.args['task']
|
||||
self.overrides = self.model.args
|
||||
self._reset_ckpt_args(self.overrides)
|
||||
self.ckpt_path = self.model.pt_path
|
||||
else:
|
||||
check_file(weights)
|
||||
weights = check_file(weights)
|
||||
self.model, self.ckpt = weights, None
|
||||
self.task = guess_model_task(weights)
|
||||
self.ckpt_path = weights
|
||||
self.ckpt_path = weights
|
||||
self.overrides['model'] = weights
|
||||
self.ModelClass, self.TrainerClass, self.ValidatorClass, self.PredictorClass = self._assign_ops_from_task()
|
||||
|
||||
@ -204,7 +236,6 @@ class YOLO:
|
||||
|
||||
return validator.metrics
|
||||
|
||||
@smart_inference_mode()
|
||||
def export(self, **kwargs):
|
||||
"""
|
||||
Export model.
|
||||
@ -279,6 +310,13 @@ class YOLO:
|
||||
"""
|
||||
return self.model.names if hasattr(self.model, 'names') else None
|
||||
|
||||
@property
|
||||
def device(self):
|
||||
"""
|
||||
Returns device if PyTorch model
|
||||
"""
|
||||
return next(self.model.parameters()).device if isinstance(self.model, nn.Module) else None
|
||||
|
||||
@property
|
||||
def transforms(self):
|
||||
"""
|
||||
@ -293,7 +331,6 @@ class YOLO:
|
||||
"""
|
||||
if not self.metrics_data:
|
||||
LOGGER.info('No metrics data found! Run training or validation operation first.')
|
||||
|
||||
return self.metrics_data
|
||||
|
||||
@staticmethod
|
||||
@ -306,7 +343,7 @@ class YOLO:
|
||||
@staticmethod
|
||||
def _reset_ckpt_args(args):
|
||||
for arg in 'augment', 'verbose', 'project', 'name', 'exist_ok', 'resume', 'batch', 'epochs', 'cache', \
|
||||
'save_json', 'half', 'v5loader', 'device', 'cfg', 'save', 'rect', 'plots', 'opset':
|
||||
'save_json', 'half', 'v5loader', 'device', 'cfg', 'save', 'rect', 'plots', 'opset', 'simplify':
|
||||
args.pop(arg, None)
|
||||
|
||||
@staticmethod
|
||||
|
@ -1,30 +1,32 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
"""
|
||||
Run prediction on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
||||
|
||||
Usage - sources:
|
||||
$ yolo task=... mode=predict model=s.pt --source 0 # webcam
|
||||
img.jpg # image
|
||||
vid.mp4 # video
|
||||
screen # screenshot
|
||||
path/ # directory
|
||||
list.txt # list of images
|
||||
list.streams # list of streams
|
||||
'path/*.jpg' # glob
|
||||
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
||||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
||||
$ yolo mode=predict model=yolov8n.pt --source 0 # webcam
|
||||
img.jpg # image
|
||||
vid.mp4 # video
|
||||
screen # screenshot
|
||||
path/ # directory
|
||||
list.txt # list of images
|
||||
list.streams # list of streams
|
||||
'path/*.jpg' # glob
|
||||
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
||||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
||||
|
||||
Usage - formats:
|
||||
$ yolo task=... mode=predict --weights yolov8n.pt # PyTorch
|
||||
yolov8n.torchscript # TorchScript
|
||||
yolov8n.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov8n_openvino_model # OpenVINO
|
||||
yolov8n.engine # TensorRT
|
||||
yolov8n.mlmodel # CoreML (macOS-only)
|
||||
yolov8n_saved_model # TensorFlow SavedModel
|
||||
yolov8n.pb # TensorFlow GraphDef
|
||||
yolov8n.tflite # TensorFlow Lite
|
||||
yolov8n_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov8n_paddle_model # PaddlePaddle
|
||||
"""
|
||||
$ yolo mode=predict model=yolov8n.pt # PyTorch
|
||||
yolov8n.torchscript # TorchScript
|
||||
yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
|
||||
yolov8n_openvino_model # OpenVINO
|
||||
yolov8n.engine # TensorRT
|
||||
yolov8n.mlmodel # CoreML (macOS-only)
|
||||
yolov8n_saved_model # TensorFlow SavedModel
|
||||
yolov8n.pb # TensorFlow GraphDef
|
||||
yolov8n.tflite # TensorFlow Lite
|
||||
yolov8n_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov8n_paddle_model # PaddlePaddle
|
||||
"""
|
||||
import platform
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
@ -200,9 +202,9 @@ class BasePredictor:
|
||||
# Print results
|
||||
if self.args.verbose and self.seen:
|
||||
t = tuple(x.t / self.seen * 1E3 for x in self.dt) # speeds per image
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms postprocess per image at shape '
|
||||
LOGGER.info(f'Speed: %.1fms preprocess, %.1fms inference, %.1fms postprocess per image at shape '
|
||||
f'{(1, 3, *self.imgsz)}' % t)
|
||||
if self.args.save_txt or self.args.save:
|
||||
if self.args.save or self.args.save_txt or self.args.save_crop:
|
||||
nl = len(list(self.save_dir.glob('labels/*.txt'))) # number of labels
|
||||
s = f"\n{nl} label{'s' * (nl > 1)} saved to {self.save_dir / 'labels'}" if self.args.save_txt else ''
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}{s}")
|
||||
|
@ -4,7 +4,6 @@ from functools import lru_cache
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision.transforms.functional as F
|
||||
from PIL import Image
|
||||
|
||||
from ultralytics.yolo.utils import LOGGER, ops
|
||||
from ultralytics.yolo.utils.plotting import Annotator, colors
|
||||
@ -136,7 +135,7 @@ class Results:
|
||||
img = deepcopy(self.orig_img)
|
||||
annotator = Annotator(img, line_width, font_size, font, pil, example)
|
||||
boxes = self.boxes
|
||||
masks = self.masks.data
|
||||
masks = self.masks
|
||||
logits = self.probs
|
||||
names = self.names
|
||||
if boxes is not None:
|
||||
|
@ -1,8 +1,10 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
"""
|
||||
Simple training loop; Boilerplate that could apply to any arbitrary neural network,
|
||||
"""
|
||||
Train a model on a dataset
|
||||
|
||||
Usage:
|
||||
$ yolo mode=train model=yolov8n.pt data=coco128.yaml imgsz=640 epochs=100 batch=16
|
||||
"""
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
|
@ -1,5 +1,23 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
"""
|
||||
Check a model's accuracy on a test or val split of a dataset
|
||||
|
||||
Usage:
|
||||
$ yolo mode=val model=yolov8n.pt data=coco128.yaml imgsz=640
|
||||
|
||||
Usage - formats:
|
||||
$ yolo mode=val model=yolov8n.pt # PyTorch
|
||||
yolov8n.torchscript # TorchScript
|
||||
yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
|
||||
yolov8n_openvino_model # OpenVINO
|
||||
yolov8n.engine # TensorRT
|
||||
yolov8n.mlmodel # CoreML (macOS-only)
|
||||
yolov8n_saved_model # TensorFlow SavedModel
|
||||
yolov8n.pb # TensorFlow GraphDef
|
||||
yolov8n.tflite # TensorFlow Lite
|
||||
yolov8n_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov8n_paddle_model # PaddlePaddle
|
||||
"""
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
@ -105,8 +123,7 @@ class BaseValidator:
|
||||
self.device = model.device
|
||||
if not pt and not jit:
|
||||
self.args.batch = 1 # export.py models default to batch-size 1
|
||||
self.logger.info(
|
||||
f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
||||
self.logger.info(f'Forcing batch=1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
||||
|
||||
if isinstance(self.args.data, str) and self.args.data.endswith('.yaml'):
|
||||
self.data = check_det_dataset(self.args.data)
|
||||
@ -136,7 +153,7 @@ class BaseValidator:
|
||||
for batch_i, batch in enumerate(bar):
|
||||
self.run_callbacks('on_val_batch_start')
|
||||
self.batch_i = batch_i
|
||||
# pre-process
|
||||
# preprocess
|
||||
with dt[0]:
|
||||
batch = self.preprocess(batch)
|
||||
|
||||
@ -149,7 +166,7 @@ class BaseValidator:
|
||||
if self.training:
|
||||
self.loss += trainer.criterion(preds, batch)[1]
|
||||
|
||||
# pre-process predictions
|
||||
# postprocess
|
||||
with dt[3]:
|
||||
preds = self.postprocess(preds)
|
||||
|
||||
@ -163,13 +180,14 @@ class BaseValidator:
|
||||
self.check_stats(stats)
|
||||
self.print_results()
|
||||
self.speed = tuple(x.t / len(self.dataloader.dataset) * 1E3 for x in dt) # speeds per image
|
||||
self.finalize_metrics()
|
||||
self.run_callbacks('on_val_end')
|
||||
if self.training:
|
||||
model.float()
|
||||
results = {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix='val')}
|
||||
return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats
|
||||
else:
|
||||
self.logger.info('Speed: %.1fms pre-process, %.1fms inference, %.1fms loss, %.1fms post-process per image' %
|
||||
self.logger.info('Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image' %
|
||||
self.speed)
|
||||
if self.args.save_json and self.jdict:
|
||||
with open(str(self.save_dir / 'predictions.json'), 'w') as f:
|
||||
@ -197,6 +215,9 @@ class BaseValidator:
|
||||
def update_metrics(self, preds, batch):
|
||||
pass
|
||||
|
||||
def finalize_metrics(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def get_stats(self):
|
||||
return {}
|
||||
|
||||
|
@ -97,6 +97,7 @@ HELP_MSG = \
|
||||
torch.set_printoptions(linewidth=320, precision=5, profile='long')
|
||||
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
|
||||
pd.options.display.max_columns = 10
|
||||
pd.options.display.width = 120
|
||||
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
|
||||
os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
|
||||
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' # for deterministic training
|
||||
@ -287,9 +288,7 @@ def is_pytest_running():
|
||||
Returns:
|
||||
(bool): True if pytest is running, False otherwise.
|
||||
"""
|
||||
with contextlib.suppress(Exception):
|
||||
return 'pytest' in sys.modules
|
||||
return False
|
||||
return ('PYTEST_CURRENT_TEST' in os.environ) or ('pytest' in sys.modules) or ('pytest' in Path(sys.argv[0]).stem)
|
||||
|
||||
|
||||
def is_github_actions_ci() -> bool:
|
||||
@ -530,8 +529,7 @@ def set_sentry():
|
||||
if SETTINGS['sync'] and \
|
||||
RANK in {-1, 0} and \
|
||||
Path(sys.argv[0]).name == 'yolo' and \
|
||||
not is_pytest_running() and \
|
||||
not is_github_actions_ci() and \
|
||||
not TESTS_RUNNING and \
|
||||
((is_pip_package() and not is_git_dir()) or
|
||||
(get_git_origin_url() == 'https://github.com/ultralytics/ultralytics.git' and get_git_branch() == 'main')):
|
||||
|
||||
@ -625,4 +623,5 @@ SETTINGS = get_settings()
|
||||
DATASETS_DIR = Path(SETTINGS['datasets_dir']) # global datasets directory
|
||||
ENVIRONMENT = 'Colab' if is_colab() else 'Kaggle' if is_kaggle() else 'Jupyter' if is_jupyter() else \
|
||||
'Docker' if is_docker() else platform.system()
|
||||
TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
|
||||
set_sentry()
|
||||
|
101
ultralytics/yolo/utils/benchmarks.py
Normal file
101
ultralytics/yolo/utils/benchmarks.py
Normal file
@ -0,0 +1,101 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
"""
|
||||
Benchmark a YOLO model formats for speed and accuracy
|
||||
|
||||
Usage:
|
||||
from ultralytics.yolo.utils.benchmarks import run_benchmarks
|
||||
run_benchmarks(model='yolov8n.pt', imgsz=160)
|
||||
|
||||
Format | `format=argument` | Model
|
||||
--- | --- | ---
|
||||
PyTorch | - | yolov8n.pt
|
||||
TorchScript | `torchscript` | yolov8n.torchscript
|
||||
ONNX | `onnx` | yolov8n.onnx
|
||||
OpenVINO | `openvino` | yolov8n_openvino_model/
|
||||
TensorRT | `engine` | yolov8n.engine
|
||||
CoreML | `coreml` | yolov8n.mlmodel
|
||||
TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/
|
||||
TensorFlow GraphDef | `pb` | yolov8n.pb
|
||||
TensorFlow Lite | `tflite` | yolov8n.tflite
|
||||
TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite
|
||||
TensorFlow.js | `tfjs` | yolov8n_web_model/
|
||||
PaddlePaddle | `paddle` | yolov8n_paddle_model/
|
||||
"""
|
||||
|
||||
import platform
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
import torch
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.yolo.engine.exporter import export_formats
|
||||
from ultralytics.yolo.utils import LOGGER, SETTINGS
|
||||
from ultralytics.yolo.utils.checks import check_yolo
|
||||
from ultralytics.yolo.utils.files import file_size
|
||||
|
||||
|
||||
def run_benchmarks(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
|
||||
imgsz=640,
|
||||
half=False,
|
||||
device='cpu',
|
||||
hard_fail=False):
|
||||
device = torch.device(int(device) if device.isnumeric() else device)
|
||||
model = YOLO(model)
|
||||
|
||||
y = []
|
||||
t0 = time.time()
|
||||
for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
|
||||
try:
|
||||
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
|
||||
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
|
||||
|
||||
if 'cpu' in device.type:
|
||||
assert cpu, 'inference not supported on CPU'
|
||||
if 'cuda' in device.type:
|
||||
assert gpu, 'inference not supported on GPU'
|
||||
|
||||
# Export
|
||||
if format == '-':
|
||||
filename = model.ckpt_path
|
||||
export = model # PyTorch format
|
||||
else:
|
||||
filename = model.export(imgsz=imgsz, format=format, half=half, device=device) # all others
|
||||
export = YOLO(filename)
|
||||
assert suffix in str(filename), 'export failed'
|
||||
|
||||
# Validate
|
||||
if model.task == 'detect':
|
||||
data, key = 'coco128.yaml', 'metrics/mAP50-95(B)'
|
||||
elif model.task == 'segment':
|
||||
data, key = 'coco128-seg.yaml', 'metrics/mAP50-95(M)'
|
||||
elif model.task == 'classify':
|
||||
data, key = 'imagenet100', 'metrics/accuracy_top5'
|
||||
|
||||
results = export.val(data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, verbose=False)
|
||||
metric, speed = results.results_dict[key], results.speed['inference']
|
||||
y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
|
||||
except Exception as e:
|
||||
if hard_fail:
|
||||
assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
|
||||
LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
|
||||
y.append([name, '❌', None, None, None]) # mAP, t_inference
|
||||
|
||||
# Print results
|
||||
LOGGER.info('\n')
|
||||
check_yolo(device=device) # print system info
|
||||
c = ['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)'] if map else ['Format', 'Export', '', '']
|
||||
df = pd.DataFrame(y, columns=c)
|
||||
LOGGER.info(f'\nBenchmarks complete for {Path(model.ckpt_path).name} on {data} at imgsz={imgsz} '
|
||||
f'({time.time() - t0:.2f}s)')
|
||||
LOGGER.info(str(df if map else df.iloc[:, :2]))
|
||||
|
||||
if hard_fail and isinstance(hard_fail, str):
|
||||
metrics = df[key].array # values to compare to floor
|
||||
floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
|
||||
assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: metric < floor {floor}'
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_benchmarks()
|
@ -1,5 +1,5 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
|
||||
from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
|
||||
from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
|
||||
|
||||
try:
|
||||
@ -7,6 +7,7 @@ try:
|
||||
from clearml import Task
|
||||
|
||||
assert clearml.__version__ # verify package is not directory
|
||||
assert not TESTS_RUNNING # do not log pytest
|
||||
except (ImportError, AssertionError):
|
||||
clearml = None
|
||||
|
||||
@ -19,14 +20,16 @@ def _log_images(imgs_dict, group='', step=0):
|
||||
|
||||
|
||||
def on_pretrain_routine_start(trainer):
|
||||
# TODO: reuse existing task
|
||||
task = Task.init(project_name=trainer.args.project or 'YOLOv8',
|
||||
task_name=trainer.args.name,
|
||||
tags=['YOLOv8'],
|
||||
output_uri=True,
|
||||
reuse_last_task_id=False,
|
||||
auto_connect_frameworks={'pytorch': False})
|
||||
task.connect(vars(trainer.args), name='General')
|
||||
try:
|
||||
task = Task.init(project_name=trainer.args.project or 'YOLOv8',
|
||||
task_name=trainer.args.name,
|
||||
tags=['YOLOv8'],
|
||||
output_uri=True,
|
||||
reuse_last_task_id=False,
|
||||
auto_connect_frameworks={'pytorch': False})
|
||||
task.connect(vars(trainer.args), name='General')
|
||||
except Exception as e:
|
||||
LOGGER.warning(f'WARNING ⚠️ ClearML not initialized correctly, not logging this run. {e}')
|
||||
|
||||
|
||||
def on_train_epoch_end(trainer):
|
||||
@ -35,18 +38,19 @@ def on_train_epoch_end(trainer):
|
||||
|
||||
|
||||
def on_fit_epoch_end(trainer):
|
||||
if trainer.epoch == 0:
|
||||
task = Task.current_task()
|
||||
if task and trainer.epoch == 0:
|
||||
model_info = {
|
||||
'Parameters': get_num_params(trainer.model),
|
||||
'GFLOPs': round(get_flops(trainer.model), 3),
|
||||
'Inference speed (ms/img)': round(trainer.validator.speed[1], 3)}
|
||||
Task.current_task().connect(model_info, name='Model')
|
||||
task.connect(model_info, name='Model')
|
||||
|
||||
|
||||
def on_train_end(trainer):
|
||||
Task.current_task().update_output_model(model_path=str(trainer.best),
|
||||
model_name=trainer.args.name,
|
||||
auto_delete_file=False)
|
||||
task = Task.current_task()
|
||||
if task:
|
||||
task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False)
|
||||
|
||||
|
||||
callbacks = {
|
||||
|
@ -1,41 +1,49 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
|
||||
from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
|
||||
from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
|
||||
|
||||
try:
|
||||
import comet_ml
|
||||
|
||||
except ImportError:
|
||||
assert not TESTS_RUNNING # do not log pytest
|
||||
assert comet_ml.__version__ # verify package is not directory
|
||||
except (ImportError, AssertionError):
|
||||
comet_ml = None
|
||||
|
||||
|
||||
def on_pretrain_routine_start(trainer):
|
||||
experiment = comet_ml.Experiment(project_name=trainer.args.project or 'YOLOv8')
|
||||
experiment.log_parameters(vars(trainer.args))
|
||||
try:
|
||||
experiment = comet_ml.Experiment(project_name=trainer.args.project or 'YOLOv8')
|
||||
experiment.log_parameters(vars(trainer.args))
|
||||
except Exception as e:
|
||||
LOGGER.warning(f'WARNING ⚠️ Comet not initialized correctly, not logging this run. {e}')
|
||||
|
||||
|
||||
def on_train_epoch_end(trainer):
|
||||
experiment = comet_ml.get_global_experiment()
|
||||
experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1)
|
||||
if trainer.epoch == 1:
|
||||
for f in trainer.save_dir.glob('train_batch*.jpg'):
|
||||
experiment.log_image(f, name=f.stem, step=trainer.epoch + 1)
|
||||
if experiment:
|
||||
experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1)
|
||||
if trainer.epoch == 1:
|
||||
for f in trainer.save_dir.glob('train_batch*.jpg'):
|
||||
experiment.log_image(f, name=f.stem, step=trainer.epoch + 1)
|
||||
|
||||
|
||||
def on_fit_epoch_end(trainer):
|
||||
experiment = comet_ml.get_global_experiment()
|
||||
experiment.log_metrics(trainer.metrics, step=trainer.epoch + 1)
|
||||
if trainer.epoch == 0:
|
||||
model_info = {
|
||||
'model/parameters': get_num_params(trainer.model),
|
||||
'model/GFLOPs': round(get_flops(trainer.model), 3),
|
||||
'model/speed(ms)': round(trainer.validator.speed[1], 3)}
|
||||
experiment.log_metrics(model_info, step=trainer.epoch + 1)
|
||||
if experiment:
|
||||
experiment.log_metrics(trainer.metrics, step=trainer.epoch + 1)
|
||||
if trainer.epoch == 0:
|
||||
model_info = {
|
||||
'model/parameters': get_num_params(trainer.model),
|
||||
'model/GFLOPs': round(get_flops(trainer.model), 3),
|
||||
'model/speed(ms)': round(trainer.validator.speed[1], 3)}
|
||||
experiment.log_metrics(model_info, step=trainer.epoch + 1)
|
||||
|
||||
|
||||
def on_train_end(trainer):
|
||||
experiment = comet_ml.get_global_experiment()
|
||||
experiment.log_model('YOLOv8', file_or_folder=str(trainer.best), file_name='best.pt', overwrite=True)
|
||||
if experiment:
|
||||
experiment.log_model('YOLOv8', file_or_folder=str(trainer.best), file_name='best.pt', overwrite=True)
|
||||
|
||||
|
||||
callbacks = {
|
||||
|
@ -4,11 +4,11 @@ import json
|
||||
from time import time
|
||||
|
||||
from ultralytics.hub.utils import PREFIX, traces
|
||||
from ultralytics.yolo.utils import LOGGER
|
||||
from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
|
||||
|
||||
|
||||
def on_pretrain_routine_end(trainer):
|
||||
session = getattr(trainer, 'hub_session', None)
|
||||
session = not TESTS_RUNNING and getattr(trainer, 'hub_session', None)
|
||||
if session:
|
||||
# Start timer for upload rate limit
|
||||
LOGGER.info(f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀')
|
||||
|
@ -194,8 +194,12 @@ def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=()
|
||||
try:
|
||||
pkg.require(r)
|
||||
except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met
|
||||
s += f'"{r}" '
|
||||
n += 1
|
||||
try: # attempt to import (slower but more accurate)
|
||||
import importlib
|
||||
importlib.import_module(next(pkg.parse_requirements(r)).name)
|
||||
except ImportError:
|
||||
s += f'"{r}" '
|
||||
n += 1
|
||||
|
||||
if s and install and AUTOINSTALL: # check environment variable
|
||||
LOGGER.info(f"{prefix} YOLOv8 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...")
|
||||
@ -250,7 +254,7 @@ def check_file(file, suffix='', download=True):
|
||||
return file
|
||||
else: # search
|
||||
files = []
|
||||
for d in 'models', 'yolo/data', 'tracker/cfg': # search directories
|
||||
for d in 'models', 'datasets', 'tracker/cfg': # search directories
|
||||
files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
|
||||
if not files:
|
||||
raise FileNotFoundError(f"'{file}' does not exist")
|
||||
@ -280,7 +284,7 @@ def check_imshow(warn=False):
|
||||
return False
|
||||
|
||||
|
||||
def check_yolo(verbose=True):
|
||||
def check_yolo(verbose=True, device=''):
|
||||
from ultralytics.yolo.utils.torch_utils import select_device
|
||||
|
||||
if is_colab():
|
||||
@ -298,7 +302,7 @@ def check_yolo(verbose=True):
|
||||
else:
|
||||
s = ''
|
||||
|
||||
select_device(newline=False)
|
||||
select_device(device=device, newline=False)
|
||||
LOGGER.info(f'Setup complete ✅ {s}')
|
||||
|
||||
|
||||
|
@ -512,6 +512,7 @@ class DetMetrics:
|
||||
self.plot = plot
|
||||
self.names = names
|
||||
self.box = Metric()
|
||||
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0}
|
||||
|
||||
def process(self, tp, conf, pred_cls, target_cls):
|
||||
results = ap_per_class(tp, conf, pred_cls, target_cls, plot=self.plot, save_dir=self.save_dir,
|
||||
@ -554,6 +555,7 @@ class SegmentMetrics:
|
||||
self.names = names
|
||||
self.box = Metric()
|
||||
self.seg = Metric()
|
||||
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0}
|
||||
|
||||
def process(self, tp_m, tp_b, conf, pred_cls, target_cls):
|
||||
results_mask = ap_per_class(tp_m,
|
||||
@ -612,6 +614,7 @@ class ClassifyMetrics:
|
||||
def __init__(self) -> None:
|
||||
self.top1 = 0
|
||||
self.top5 = 0
|
||||
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0}
|
||||
|
||||
def process(self, targets, pred):
|
||||
# target classes and predicted classes
|
||||
|
@ -154,7 +154,7 @@ class Annotator:
|
||||
|
||||
def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):
|
||||
# Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
|
||||
xyxy = torch.tensor(xyxy).view(-1, 4)
|
||||
xyxy = torch.Tensor(xyxy).view(-1, 4)
|
||||
b = xyxy2xywh(xyxy) # boxes
|
||||
if square:
|
||||
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
|
||||
|
@ -223,7 +223,7 @@ def make_anchors(feats, strides, grid_cell_offset=0.5):
|
||||
|
||||
def dist2bbox(distance, anchor_points, xywh=True, dim=-1):
|
||||
"""Transform distance(ltrb) to box(xywh or xyxy)."""
|
||||
lt, rb = torch.split(distance, 2, dim)
|
||||
lt, rb = distance.chunk(2, dim)
|
||||
x1y1 = anchor_points - lt
|
||||
x2y2 = anchor_points + rb
|
||||
if xywh:
|
||||
@ -235,5 +235,5 @@ def dist2bbox(distance, anchor_points, xywh=True, dim=-1):
|
||||
|
||||
def bbox2dist(anchor_points, bbox, reg_max):
|
||||
"""Transform bbox(xyxy) to dist(ltrb)."""
|
||||
x1y1, x2y2 = torch.split(bbox, 2, -1)
|
||||
x1y1, x2y2 = bbox.chunk(2, -1)
|
||||
return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp(0, reg_max - 0.01) # dist (lt, rb)
|
||||
|
@ -22,7 +22,7 @@ class ClassificationPredictor(BasePredictor):
|
||||
results = []
|
||||
for i, pred in enumerate(preds):
|
||||
orig_img = orig_img[i] if isinstance(orig_img, list) else orig_img
|
||||
results.append(Results(probs=pred.softmax(0), orig_img=orig_img, names=self.model.names))
|
||||
results.append(Results(probs=pred, orig_img=orig_img, names=self.model.names))
|
||||
|
||||
return results
|
||||
|
||||
|
@ -30,6 +30,9 @@ class ClassificationValidator(BaseValidator):
|
||||
self.pred.append(preds.argsort(1, descending=True)[:, :5])
|
||||
self.targets.append(batch['cls'])
|
||||
|
||||
def finalize_metrics(self, *args, **kwargs):
|
||||
self.metrics.speed = dict(zip(self.metrics.speed.keys(), self.speed))
|
||||
|
||||
def get_stats(self):
|
||||
self.metrics.process(self.targets, self.pred)
|
||||
return self.metrics.results_dict
|
||||
|
@ -111,6 +111,9 @@ class DetectionValidator(BaseValidator):
|
||||
# if self.args.save_txt:
|
||||
# save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
|
||||
|
||||
def finalize_metrics(self, *args, **kwargs):
|
||||
self.metrics.speed = dict(zip(self.metrics.speed.keys(), self.speed))
|
||||
|
||||
def get_stats(self):
|
||||
stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*self.stats)] # to numpy
|
||||
if len(stats) and stats[0].any():
|
||||
|
@ -1,6 +1,5 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
|
||||
import os
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from pathlib import Path
|
||||
|
||||
@ -10,7 +9,7 @@ import torch.nn.functional as F
|
||||
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG, NUM_THREADS, ops
|
||||
from ultralytics.yolo.utils.checks import check_requirements
|
||||
from ultralytics.yolo.utils.metrics import ConfusionMatrix, SegmentMetrics, box_iou, mask_iou
|
||||
from ultralytics.yolo.utils.metrics import SegmentMetrics, box_iou, mask_iou
|
||||
from ultralytics.yolo.utils.plotting import output_to_target, plot_images
|
||||
from ultralytics.yolo.v8.detect import DetectionValidator
|
||||
|
||||
@ -120,6 +119,9 @@ class SegmentationValidator(DetectionValidator):
|
||||
# if self.args.save_txt:
|
||||
# save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
|
||||
|
||||
def finalize_metrics(self, *args, **kwargs):
|
||||
self.metrics.speed = dict(zip(self.metrics.speed.keys(), self.speed))
|
||||
|
||||
def _process_batch(self, detections, labels, pred_masks=None, gt_masks=None, overlap=False, masks=False):
|
||||
"""
|
||||
Return correct prediction matrix
|
||||
|
Reference in New Issue
Block a user