@ -60,7 +60,7 @@
"base_uri": "https://localhost:8080/",
"base_uri": "https://localhost:8080/",
"height": 1000
"height": 1000
},
},
"outputId": "19bbf989-d9fa-419d-8948-aaba39db8ddb "
"outputId": "9012b4cd-53eb-4c84-f5b7-4976d4b4e58a "
},
},
"source": [
"source": [
"# Pip install method (recommended)\n",
"# Pip install method (recommended)\n",
@ -74,7 +74,7 @@
"output_type": "stream",
"output_type": "stream",
"name": "stderr",
"name": "stderr",
"text": [
"text": [
"Ultralytics YOLOv8.0.4 🚀 Python-3.8.16 torch-1.13.1+cu116 CUDA:0 (Tesla T4, 15110MiB)\n",
"Ultralytics YOLOv8.0.5 🚀 Python-3.8.16 torch-1.13.1+cu116 CUDA:0 (Tesla T4, 15110MiB)\n",
"Setup complete ✅ (2 CPUs, 12.7 GB RAM, 23.0/166.8 GB disk)\n"
"Setup complete ✅ (2 CPUs, 12.7 GB RAM, 23.0/166.8 GB disk)\n"
]
]
}
}
@ -111,28 +111,28 @@
"colab": {
"colab": {
"base_uri": "https://localhost:8080/"
"base_uri": "https://localhost:8080/"
},
},
"outputId": "bc3ee5db-5c36-4dcc-d016-d6b93c756eb2 "
"outputId": "3136de6b-2995-4731-e84c-962acb233d89 "
},
},
"source": [
"source": [
"# Run inference on an image with YOLOv8n\n",
"# Run inference on an image with YOLOv8n\n",
"!yolo task=detect mode=predict model=yolov8n.pt conf=0.25 source='https://ultralytics.com/images/zidane.jpg'"
"!yolo task=detect mode=predict model=yolov8n.pt conf=0.25 source='https://ultralytics.com/images/zidane.jpg'"
],
],
"execution_count": null ,
"execution_count": 2 ,
"outputs": [
"outputs": [
{
{
"output_type": "stream",
"output_type": "stream",
"name": "stdout",
"name": "stdout",
"text": [
"text": [
"Downloading https://ultralytics.com/images/zidane.jpg to zidane.jpg...\n",
"Downloading https://ultralytics.com/images/zidane.jpg to zidane.jpg...\n",
"100% 165k/165k [00:00<00:00, 8.97 MB/s]\n",
"100% 165k/165k [00:00<00:00, 12.0 MB/s]\n",
"Ultralytics YOLOv8.0.1 🚀 Python-3.8.16 torch-1.13.0 +cu116 CUDA:0 (Tesla T4, 15110MiB)\n",
"Ultralytics YOLOv8.0.5 🚀 Python-3.8.16 torch-1.13.1 +cu116 CUDA:0 (Tesla T4, 15110MiB)\n",
"Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt to yolov8n.pt...\n",
"Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt to yolov8n.pt...\n",
"100% 6.24M/6.24M [00:01<00:00, 6.32 MB/s]\n",
"100% 6.24M/6.24M [00:00<00:00, 58.7 MB/s]\n",
"\n",
"\n",
"Fusing layers... \n",
"Fusing layers... \n",
"YOLOv8n summary: 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n",
"YOLOv8n summary: 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n",
"image 1/1 /content/zidane.jpg: 384x640 2 persons, 1 tie, 13.6ms\n",
"image 1/1 /content/zidane.jpg: 384x640 2 persons, 1 tie, 13.6ms\n",
"Speed: 0.4ms pre-process, 13.6ms inference, 51.9 ms postprocess per image at shape (1, 3, 640, 640)\n",
"Speed: 0.4ms pre-process, 13.6ms inference, 52.1 ms postprocess per image at shape (1, 3, 640, 640)\n",
"Results saved to \u001b[1mruns/detect/predict\u001b[0m\n"
"Results saved to \u001b[1mruns/detect/predict\u001b[0m\n"
]
]
}
}
@ -175,111 +175,14 @@
{
{
"cell_type": "code",
"cell_type": "code",
"metadata": {
"metadata": {
"id": "X58w8JLpMnjH",
"id": "X58w8JLpMnjH"
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "ec81409c-7f16-44ec-ac70-8c09021e25a1"
},
},
"source": [
"source": [
"# Validate YOLOv8n on COCO128 val\n",
"# Validate YOLOv8n on COCO128 val\n",
"!yolo task=detect mode=val model=yolov8n.pt data=coco128.yaml"
"!yolo task=detect mode=val model=yolov8n.pt data=coco128.yaml"
],
],
"execution_count": null,
"execution_count": null,
"outputs": [
"outputs": []
{
"output_type": "stream",
"name": "stdout",
"text": [
"Ultralytics YOLOv8.0.1 🚀 Python-3.8.16 torch-1.13.0+cu116 CUDA:0 (Tesla T4, 15110MiB)\n",
"Fusing layers... \n",
"YOLOv8n summary: 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n",
"\n",
"Dataset not found ⚠️, missing paths ['/datasets/coco128/images/train2017']\n",
"Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n",
"100% 6.66M/6.66M [00:01<00:00, 6.22MB/s]\n",
"Dataset download success ✅ (1.9s), saved to \u001b[1m/datasets\u001b[0m\n",
"Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n",
"100% 755k/755k [00:00<00:00, 27.8MB/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning /datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1327.78it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /datasets/coco128/labels/train2017.cache\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 8/8 [00:04<00:00, 1.74it/s]\n",
" all 128 929 0.64 0.537 0.605 0.446\n",
" person 128 254 0.797 0.677 0.764 0.538\n",
" bicycle 128 6 0.514 0.333 0.315 0.264\n",
" car 128 46 0.813 0.217 0.273 0.168\n",
" motorcycle 128 5 0.687 0.887 0.898 0.685\n",
" airplane 128 6 0.82 0.833 0.927 0.675\n",
" bus 128 7 0.491 0.714 0.728 0.671\n",
" train 128 3 0.534 0.667 0.706 0.604\n",
" truck 128 12 1 0.332 0.473 0.297\n",
" boat 128 6 0.226 0.167 0.316 0.134\n",
" traffic light 128 14 0.734 0.2 0.202 0.139\n",
" stop sign 128 2 1 0.992 0.995 0.701\n",
" bench 128 9 0.839 0.582 0.62 0.365\n",
" bird 128 16 0.921 0.728 0.864 0.51\n",
" cat 128 4 0.875 1 0.995 0.791\n",
" dog 128 9 0.603 0.889 0.785 0.585\n",
" horse 128 2 0.597 1 0.995 0.518\n",
" elephant 128 17 0.849 0.765 0.9 0.679\n",
" bear 128 1 0.593 1 0.995 0.995\n",
" zebra 128 4 0.848 1 0.995 0.965\n",
" giraffe 128 9 0.72 1 0.951 0.722\n",
" backpack 128 6 0.589 0.333 0.376 0.232\n",
" umbrella 128 18 0.804 0.5 0.643 0.414\n",
" handbag 128 19 0.424 0.0526 0.165 0.0889\n",
" tie 128 7 0.804 0.714 0.674 0.476\n",
" suitcase 128 4 0.635 0.883 0.745 0.534\n",
" frisbee 128 5 0.675 0.8 0.759 0.688\n",
" skis 128 1 0.567 1 0.995 0.497\n",
" snowboard 128 7 0.742 0.714 0.747 0.5\n",
" sports ball 128 6 0.716 0.433 0.485 0.278\n",
" kite 128 10 0.817 0.45 0.569 0.184\n",
" baseball bat 128 4 0.551 0.25 0.353 0.175\n",
" baseball glove 128 7 0.624 0.429 0.429 0.293\n",
" skateboard 128 5 0.846 0.6 0.6 0.41\n",
" tennis racket 128 7 0.726 0.387 0.487 0.33\n",
" bottle 128 18 0.448 0.389 0.376 0.208\n",
" wine glass 128 16 0.743 0.362 0.584 0.333\n",
" cup 128 36 0.58 0.278 0.404 0.29\n",
" fork 128 6 0.527 0.167 0.246 0.184\n",
" knife 128 16 0.564 0.5 0.59 0.36\n",
" spoon 128 22 0.597 0.182 0.328 0.19\n",
" bowl 128 28 0.648 0.643 0.618 0.491\n",
" banana 128 1 0 0 0.124 0.0379\n",
" sandwich 128 2 0.249 0.5 0.308 0.308\n",
" orange 128 4 1 0.31 0.995 0.623\n",
" broccoli 128 11 0.374 0.182 0.249 0.203\n",
" carrot 128 24 0.648 0.458 0.572 0.362\n",
" hot dog 128 2 0.351 0.553 0.745 0.721\n",
" pizza 128 5 0.644 1 0.995 0.843\n",
" donut 128 14 0.657 1 0.94 0.864\n",
" cake 128 4 0.618 1 0.945 0.845\n",
" chair 128 35 0.506 0.514 0.442 0.239\n",
" couch 128 6 0.463 0.5 0.706 0.555\n",
" potted plant 128 14 0.65 0.643 0.711 0.472\n",
" bed 128 3 0.698 0.667 0.789 0.625\n",
" dining table 128 13 0.432 0.615 0.485 0.366\n",
" toilet 128 2 0.615 0.5 0.695 0.676\n",
" tv 128 2 0.373 0.62 0.745 0.696\n",
" laptop 128 3 1 0 0.451 0.361\n",
" mouse 128 2 1 0 0.0625 0.00625\n",
" remote 128 8 0.843 0.5 0.605 0.529\n",
" cell phone 128 8 0 0 0.0549 0.0393\n",
" microwave 128 3 0.435 0.667 0.806 0.718\n",
" oven 128 5 0.412 0.4 0.339 0.27\n",
" sink 128 6 0.35 0.167 0.182 0.129\n",
" refrigerator 128 5 0.589 0.4 0.604 0.452\n",
" book 128 29 0.629 0.103 0.346 0.178\n",
" clock 128 9 0.788 0.83 0.875 0.74\n",
" vase 128 2 0.376 1 0.828 0.795\n",
" scissors 128 1 1 0 0.249 0.0746\n",
" teddy bear 128 21 0.877 0.333 0.591 0.394\n",
" toothbrush 128 5 0.743 0.6 0.638 0.374\n",
"Speed: 0.9ms pre-process, 5.5ms inference, 0.0ms loss, 2.4ms post-process per image\n"
]
}
]
},
},
{
{
"cell_type": "markdown",
"cell_type": "markdown",
@ -291,169 +194,20 @@
"\n",
"\n",
"<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"1000\" src=\"https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png\"/></a></p>\n",
"<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"1000\" src=\"https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png\"/></a></p>\n",
"\n",
"\n",
"Train YOLOv8 on [Detection](https://docs.ultralytics.com/tasks/detection/), [Segmentation](https://docs.ultralytics.com/tasks/detection/) and [Classification](https://docs.ultralytics.com/tasks/detec tion/) datasets."
"Train YOLOv8 on [Detection](https://docs.ultralytics.com/tasks/detection/), [Segmentation](https://docs.ultralytics.com/tasks/segmentation/) and [Classification](https://docs.ultralytics.com/tasks/classifica tion/) datasets."
]
]
},
},
{
{
"cell_type": "code",
"cell_type": "code",
"metadata": {
"metadata": {
"id": "1NcFxRcFdJ_O",
"id": "1NcFxRcFdJ_O"
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "0f87de5c-da4e-4290-ee64-2de4d4d7cd8e"
},
},
"source": [
"source": [
"# Train YOLOv8n on COCO128 for 3 epochs\n",
"# Train YOLOv8n on COCO128 for 3 epochs\n",
"!yolo task=detect mode=train model=yolov8n.pt data=coco128.yaml epochs=3 imgsz=640"
"!yolo task=detect mode=train model=yolov8n.pt data=coco128.yaml epochs=3 imgsz=640"
],
],
"execution_count": null,
"execution_count": null,
"outputs": [
"outputs": []
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[34m\u001b[1myolo/engine/trainer: \u001b[0mtask=detect, mode=train, model=yolov8n.pt, data=coco128.yaml, epochs=3, patience=50, batch=16, imgsz=640, save=True, cache=False, device=None, workers=8, project=None, name=None, exist_ok=False, pretrained=False, optimizer=SGD, verbose=False, seed=0, deterministic=True, single_cls=False, image_weights=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, show=False, save_txt=False, save_conf=False, save_crop=False, hide_labels=False, hide_conf=False, vid_stride=1, line_thickness=3, visualize=False, augment=False, agnostic_nms=False, retina_masks=False, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=17, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, fl_gamma=0.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, hydra={'output_subdir': None, 'run': {'dir': '.'}}, v5loader=False, save_dir=runs/detect/train\n",
"Ultralytics YOLOv8.0.1 🚀 Python-3.8.16 torch-1.13.0+cu116 CUDA:0 (Tesla T4, 15110MiB)\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 464 ultralytics.nn.modules.Conv [3, 16, 3, 2] \n",
" 1 -1 1 4672 ultralytics.nn.modules.Conv [16, 32, 3, 2] \n",
" 2 -1 1 7360 ultralytics.nn.modules.C2f [32, 32, 1, True] \n",
" 3 -1 1 18560 ultralytics.nn.modules.Conv [32, 64, 3, 2] \n",
" 4 -1 2 49664 ultralytics.nn.modules.C2f [64, 64, 2, True] \n",
" 5 -1 1 73984 ultralytics.nn.modules.Conv [64, 128, 3, 2] \n",
" 6 -1 2 197632 ultralytics.nn.modules.C2f [128, 128, 2, True] \n",
" 7 -1 1 295424 ultralytics.nn.modules.Conv [128, 256, 3, 2] \n",
" 8 -1 1 460288 ultralytics.nn.modules.C2f [256, 256, 1, True] \n",
" 9 -1 1 164608 ultralytics.nn.modules.SPPF [256, 256, 5] \n",
" 10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 11 [-1, 6] 1 0 ultralytics.nn.modules.Concat [1] \n",
" 12 -1 1 148224 ultralytics.nn.modules.C2f [384, 128, 1] \n",
" 13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 14 [-1, 4] 1 0 ultralytics.nn.modules.Concat [1] \n",
" 15 -1 1 37248 ultralytics.nn.modules.C2f [192, 64, 1] \n",
" 16 -1 1 36992 ultralytics.nn.modules.Conv [64, 64, 3, 2] \n",
" 17 [-1, 12] 1 0 ultralytics.nn.modules.Concat [1] \n",
" 18 -1 1 123648 ultralytics.nn.modules.C2f [192, 128, 1] \n",
" 19 -1 1 147712 ultralytics.nn.modules.Conv [128, 128, 3, 2] \n",
" 20 [-1, 9] 1 0 ultralytics.nn.modules.Concat [1] \n",
" 21 -1 1 493056 ultralytics.nn.modules.C2f [384, 256, 1] \n",
" 22 [15, 18, 21] 1 897664 ultralytics.nn.modules.Detect [80, [64, 128, 256]] \n",
"Model summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs\n",
"\n",
"Transferred 355/355 items from pretrained weights\n",
"\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 64 weight(decay=0.0005), 63 bias\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning /datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n",
"\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning /datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n",
"Image sizes 640 train, 640 val\n",
"Using 2 dataloader workers\n",
"Logging results to \u001b[1mruns/detect/train\u001b[0m\n",
"Starting training for 3 epochs...\n",
"\n",
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
" 1/3 4.31G 1.221 1.429 1.241 196 640: 100% 8/8 [00:08<00:00, 1.06s/it]\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 4/4 [00:01<00:00, 2.18it/s]\n",
" all 128 929 0.671 0.516 0.617 0.457\n",
"\n",
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
" 2/3 5.31G 1.186 1.306 1.255 287 640: 100% 8/8 [00:05<00:00, 1.57it/s]\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 4/4 [00:01<00:00, 2.23it/s]\n",
" all 128 929 0.668 0.582 0.637 0.473\n",
"\n",
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
" 3/3 5.31G 1.17 1.408 1.267 189 640: 100% 8/8 [00:04<00:00, 1.62it/s]\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 4/4 [00:03<00:00, 1.01it/s]\n",
" all 128 929 0.638 0.601 0.645 0.483\n",
"\n",
"3 epochs completed in 0.010 hours.\n",
"Optimizer stripped from runs/detect/train/weights/last.pt, 6.5MB\n",
"Optimizer stripped from runs/detect/train/weights/best.pt, 6.5MB\n",
"\n",
"Validating runs/detect/train/weights/best.pt...\n",
"Ultralytics YOLOv8.0.1 🚀 Python-3.8.16 torch-1.13.0+cu116 CUDA:0 (Tesla T4, 15110MiB)\n",
"Fusing layers... \n",
"Model summary: 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 4/4 [00:04<00:00, 1.20s/it]\n",
" all 128 929 0.638 0.602 0.644 0.483\n",
" person 128 254 0.703 0.709 0.769 0.548\n",
" bicycle 128 6 0.455 0.333 0.322 0.254\n",
" car 128 46 0.773 0.217 0.291 0.184\n",
" motorcycle 128 5 0.551 0.8 0.895 0.724\n",
" airplane 128 6 0.743 0.833 0.927 0.73\n",
" bus 128 7 0.692 0.714 0.7 0.636\n",
" train 128 3 0.733 0.931 0.913 0.797\n",
" truck 128 12 0.752 0.5 0.497 0.324\n",
" boat 128 6 0.41 0.333 0.492 0.344\n",
" traffic light 128 14 0.682 0.214 0.202 0.139\n",
" stop sign 128 2 0.933 1 0.995 0.671\n",
" bench 128 9 0.752 0.556 0.603 0.416\n",
" bird 128 16 0.875 0.876 0.957 0.641\n",
" cat 128 4 0.863 1 0.995 0.76\n",
" dog 128 9 0.554 0.778 0.855 0.664\n",
" horse 128 2 0.706 1 0.995 0.561\n",
" elephant 128 17 0.761 0.882 0.929 0.722\n",
" bear 128 1 0.595 1 0.995 0.995\n",
" zebra 128 4 0.85 1 0.995 0.966\n",
" giraffe 128 9 0.891 1 0.995 0.683\n",
" backpack 128 6 0.487 0.333 0.354 0.224\n",
" umbrella 128 18 0.54 0.667 0.687 0.461\n",
" handbag 128 19 0.496 0.105 0.212 0.125\n",
" tie 128 7 0.611 0.714 0.615 0.432\n",
" suitcase 128 4 0.469 1 0.745 0.529\n",
" frisbee 128 5 0.622 0.8 0.733 0.64\n",
" skis 128 1 0.721 1 0.995 0.531\n",
" snowboard 128 7 0.687 0.714 0.751 0.51\n",
" sports ball 128 6 0.71 0.42 0.503 0.282\n",
" kite 128 10 0.81 0.5 0.59 0.197\n",
" baseball bat 128 4 0.474 0.461 0.261 0.115\n",
" baseball glove 128 7 0.67 0.429 0.43 0.317\n",
" skateboard 128 5 0.751 0.6 0.599 0.387\n",
" tennis racket 128 7 0.742 0.415 0.507 0.378\n",
" bottle 128 18 0.409 0.333 0.354 0.235\n",
" wine glass 128 16 0.562 0.5 0.597 0.356\n",
" cup 128 36 0.67 0.306 0.411 0.296\n",
" fork 128 6 0.57 0.167 0.229 0.203\n",
" knife 128 16 0.608 0.562 0.634 0.405\n",
" spoon 128 22 0.529 0.358 0.369 0.201\n",
" bowl 128 28 0.594 0.679 0.671 0.56\n",
" banana 128 1 0.0625 0.312 0.199 0.0513\n",
" sandwich 128 2 0.638 0.913 0.828 0.828\n",
" orange 128 4 0.743 0.728 0.895 0.595\n",
" broccoli 128 11 0.49 0.264 0.278 0.232\n",
" carrot 128 24 0.547 0.667 0.704 0.47\n",
" hot dog 128 2 0.578 1 0.828 0.796\n",
" pizza 128 5 0.835 1 0.995 0.84\n",
" donut 128 14 0.537 1 0.891 0.788\n",
" cake 128 4 0.807 1 0.995 0.904\n",
" chair 128 35 0.401 0.514 0.485 0.277\n",
" couch 128 6 0.795 0.649 0.746 0.504\n",
" potted plant 128 14 0.563 0.643 0.676 0.471\n",
" bed 128 3 0.777 1 0.995 0.735\n",
" dining table 128 13 0.425 0.692 0.578 0.48\n",
" toilet 128 2 0.508 0.5 0.745 0.721\n",
" tv 128 2 0.55 0.649 0.828 0.762\n",
" laptop 128 3 1 0 0.741 0.653\n",
" mouse 128 2 1 0 0.0454 0.00907\n",
" remote 128 8 0.83 0.5 0.569 0.449\n",
" cell phone 128 8 0 0 0.0819 0.0266\n",
" microwave 128 3 0.475 0.667 0.83 0.699\n",
" oven 128 5 0.5 0.4 0.348 0.275\n",
" sink 128 6 0.354 0.187 0.368 0.217\n",
" refrigerator 128 5 0.518 0.4 0.729 0.571\n",
" book 128 29 0.583 0.241 0.396 0.204\n",
" clock 128 9 0.891 0.889 0.91 0.773\n",
" vase 128 2 0.506 1 0.828 0.745\n",
" scissors 128 1 1 0 0.142 0.0426\n",
" teddy bear 128 21 0.587 0.476 0.63 0.458\n",
" toothbrush 128 5 0.784 0.736 0.898 0.544\n",
"Speed: 0.4ms pre-process, 4.7ms inference, 0.0ms loss, 3.3ms post-process per image\n",
"Saving runs/detect/train/predictions.json...\n",
"Results saved to \u001b[1mruns/detect/train\u001b[0m\n"
]
}
]
},
},
{
{
"cell_type": "markdown",
"cell_type": "markdown",
@ -462,6 +216,9 @@
"\n",
"\n",
"Export a YOLOv8 model to any supported format with the `format` argument, i.e. `format=onnx`.\n",
"Export a YOLOv8 model to any supported format with the `format` argument, i.e. `format=onnx`.\n",
"\n",
"\n",
"- 💡 ProTip: Export to ONNX or OpenVINO for up to 3x CPU speedup. \n",
"- 💡 ProTip: Export to TensorRT for up to 5x GPU speedup.\n",
"\n",
"\n",
"\n",
"| Format | `format=` | Model |\n",
"| Format | `format=` | Model |\n",
"|----------------------------------------------------------------------------|--------------------|---------------------------|\n",
"|----------------------------------------------------------------------------|--------------------|---------------------------|\n",
@ -524,7 +281,7 @@
"source": [
"source": [
"# 5. Python Usage\n",
"# 5. Python Usage\n",
"\n",
"\n",
"YOLOv8 was reimagined using Python-first principles for the most seamless Python YOLO experience yet. YOLOv8 models can be loaded from a trained checkpoint or created from scratch. Then methods are used to train, val, predict, and export the model. See a detailed Python usage examples in the YOLOv8 [Docs](https://docs.ultralytics.com)."
"YOLOv8 was reimagined using Python-first principles for the most seamless Python YOLO experience yet. YOLOv8 models can be loaded from a trained checkpoint or created from scratch. Then methods are used to train, val, predict, and export the model. See a detailed Python usage examples in the YOLOv8 [Docs](https://docs.ultralytics.com/python/ )."
],
],
"metadata": {
"metadata": {
"id": "kUMOQ0OeDBJG"
"id": "kUMOQ0OeDBJG"
@ -578,12 +335,12 @@
{
{
"cell_type": "code",
"cell_type": "code",
"source": [
"source": [
"# Load a pretraind YOLOv8n detection model , train it on COCO128 for 3 epochs and predict an image with it\n",
"# Load YOLOv8n, train it on COCO128 for 3 epochs and predict an image with it\n",
"from ultralytics import YOLO\n",
"from ultralytics import YOLO\n",
"\n",
"\n",
"model = YOLO('yolov8n.pt') # load a pretrained YOLOv8n detection model\n",
"model = YOLO('yolov8n.pt') # load a pretrained YOLOv8n detection model\n",
"model.train(data='coco128.yaml', epochs=3) # train the model\n",
"model.train(data='coco128.yaml', epochs=3) # train the model\n",
"model.predict (source= 'https://ultralytics.com/images/bus.jpg') # predict on an image"
"model('https://ultralytics.com/images/bus.jpg') # predict on an image"
],
],
"metadata": {
"metadata": {
"id": "8Go5qqS9LbC5"
"id": "8Go5qqS9LbC5"
@ -605,12 +362,12 @@
{
{
"cell_type": "code",
"cell_type": "code",
"source": [
"source": [
"# Load a pretraind YOLOv8n segmentation model , train it on COCO128-seg for 3 epochs and predict an image with it\n",
"# Load YOLOv8n-seg , train it on COCO128-seg for 3 epochs and predict an image with it\n",
"from ultralytics import YOLO\n",
"from ultralytics import YOLO\n",
"\n",
"\n",
"model = YOLO('yolov8n-seg.pt') # load a pretrained YOLOv8n segmentation model\n",
"model = YOLO('yolov8n-seg.pt') # load a pretrained YOLOv8n segmentation model\n",
"model.train(data='coco128-seg.yaml', epochs=3) # train the model\n",
"model.train(data='coco128-seg.yaml', epochs=3) # train the model\n",
"model.predict (source= 'https://ultralytics.com/images/bus.jpg') # predict on an image"
"model('https://ultralytics.com/images/bus.jpg') # predict on an image"
],
],
"metadata": {
"metadata": {
"id": "WFPJIQl_L5HT"
"id": "WFPJIQl_L5HT"
@ -623,7 +380,7 @@
"source": [
"source": [
"## 3. Classification\n",
"## 3. Classification\n",
"\n",
"\n",
"YOLOv8 _classification_ models use the `-cls` suffix, i.e. `yolov8n-cls.pt` and are pretrained on ImageNet. See [Classification Docs](https://docs.ultralytics.com/tasks/detec tion/) for full details.\n"
"YOLOv8 _classification_ models use the `-cls` suffix, i.e. `yolov8n-cls.pt` and are pretrained on ImageNet. See [Classification Docs](https://docs.ultralytics.com/tasks/classifica tion/) for full details.\n"
],
],
"metadata": {
"metadata": {
"id": "ax3p94VNK9zR"
"id": "ax3p94VNK9zR"
@ -632,12 +389,12 @@
{
{
"cell_type": "code",
"cell_type": "code",
"source": [
"source": [
"# Load a pretraind YOLOv8n classification model , train it on imagenette160 for 3 epochs and predict an image with it\n",
"# Load YOLOv8n-cls , train it on imagenette160 for 3 epochs and predict an image with it\n",
"from ultralytics import YOLO\n",
"from ultralytics import YOLO\n",
"\n",
"\n",
"model = YOLO('yolov8n-cls.pt') # load a pretrained YOLOv8n classification model\n",
"model = YOLO('yolov8n-cls.pt') # load a pretrained YOLOv8n classification model\n",
"model.train(data='imagenette160', epochs=3) # train the model\n",
"model.train(data='imagenette160', epochs=3) # train the model\n",
"model.predict (source= 'https://ultralytics.com/images/bus.jpg') # predict on an image"
"model('https://ultralytics.com/images/bus.jpg') # predict on an image"
],
],
"metadata": {
"metadata": {
"id": "5q9Zu6zlL5rS"
"id": "5q9Zu6zlL5rS"