from ultralytics import YOLO
# Load a model
model = YOLO('yolov8n.yaml') # build a new model from YAML
model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training)
model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights
# Train the model
results = model.train(data='coco128.yaml', epochs=100, imgsz=640)
mAP50(B) 是指”Mean Average Precision at IoU 0.50 for Large Objects”,意思是在IoU(Intersection over Union,重疊度)為0.50 的情況下,針對較大目標計算的平均精度(AP)值的均值。 mAP 是模型在不同類別上的平均精度值,而 mAP50(B) 是針對較大目標計算的平均精度值。
mAP50-95(B)
mAP50-95(B) 是指 “Mean Average Precision across IoU 0.50 to 0.95 for Large Objects”,意思是在 IoU 從 0.50 到 0.95 範圍內,針對較大目標計算的平均精度值的均值。這個指標更全面地評估了模型在不同重疊度下的性能。
from ultralytics import YOLO
# Load a model
model = YOLO("yolov8n.yaml") # build a new model from scratch
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
# Use the model
model.train(data="coco128.yaml", epochs=3) # train the model
metrics = model.val() # evaluate model performance on the validation set
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
path = model.export(format="onnx") # export the model to ONNX format
from ultralytics import YOLO
# Create a new YOLO model from scratch
model = YOLO('yolov8n.yaml')
# Load a pretrained YOLO model (recommended for training)
model = YOLO('yolov8n.pt')
# Train the model using the 'coco128.yaml' dataset for 3 epochs
results = model.train(data='coco128.yaml', epochs=3)
# Evaluate the model's performance on the validation set
results = model.val()
# Perform object detection on an image using the model
results = model('https://ultralytics.com/images/bus.jpg')
# Export the model to ONNX format
success = model.export(format='onnx')
git clone --recursive https://github.com/pytorch/pytorch
cd pytorch
# if you are updating an existing checkout
git submodule sync
git submodule update --init --recursive
conda install cmake ninja
# Run this command from the PyTorch directory after cloning the source code using the “Get the PyTorch Source“ section below
pip install -r requirements.txt
# Add this package on intel x86 processor machines only
conda install mkl mkl-include
# Add these packages if torch.distributed is needed
conda install pkg-config libuv
<!-- Load TensorFlow.js. This is required to use coco-ssd model. -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"> </script>
<!-- Load the coco-ssd model. -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/coco-ssd"> </script>
<!-- Replace this with your image. Make sure CORS settings allow reading the image! -->
<img id="img" src="cat.jpg"/>
<!-- Place your code in the script tag below. You can also use an external .js file -->
<script>
// Notice there is no 'import' statement. 'cocoSsd' and 'tf' is
// available on the index-page because of the script tag above.
const img = document.getElementById('img');
// Load the model.
cocoSsd.load().then(model => {
// detect objects in the image.
model.detect(img).then(predictions => {
console.log('Predictions: ', predictions);
});
});
</script>
讀取攝像機
以下為一個簡單讀取攝像機並且作物件偵測的程式範例
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Test</title>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"> </script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/coco-ssd"> </script>
</head>
<body>
<div class="select">
<label for="videoSource">Video source: </label><select id="videoSource"></select>
</div>
<button id="showVideo">Open camera</button>
<br />
<!-- Video element to capture camera input -->
<video id="video" autoplay playsinline style="position: absolute;z-index: -999;"></video><canvas id="canvas"
width="100%"></canvas>
<div id="message"></div>
<script>
const video = document.getElementById('video');
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
document.querySelector('#showVideo').addEventListener('click', e => init(e));
const videoSelect = document.querySelector('select#videoSource');
function gotDevices(deviceInfos) {
console.log(deviceInfos)
deviceInfos.forEach(deviceInfo => {
if (deviceInfo.kind == "videoinput") {
const option = document.createElement('option');
option.value = deviceInfo.deviceId;
option.text = deviceInfo.label || `camera ${videoSelect.length + 1}`;
videoSelect.appendChild(option);
}
});
}
function gotStream(stream) {
window.stream = stream; // make stream available to console
videoElement.srcObject = stream;
// Refresh button list in case labels have become available
return navigator.mediaDevices.enumerateDevices();
}
window.onload = () => {
navigator.mediaDevices.enumerateDevices().then(gotDevices).catch(handleError);
const constraints = {
audio: { deviceId: audioSource ? { exact: audioSource } : undefined },
video: { deviceId: videoSource ? { exact: videoSource } : undefined }
};
navigator.mediaDevices.getUserMedia(constraints).then(gotStream).then(gotDevices).catch(handleError);
}
function getRandomColor() {
const randomColor = Math.floor(Math.random() * 16777215).toString(16);
return "#" + ("000000" + randomColor).slice(-6);
}
function handleSuccess(stream) {
const videoTracks = stream.getVideoTracks();
var predictions = [];
video.srcObject = stream;
// When the video is playing, draw it on the canvas
video.addEventListener('play', () => {
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
// Continuously draw the video frames on the canvas
function drawFrame() {
// Draw the video frame on the canvas
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
const scaleX = canvas.width / video.videoWidth;
const scaleY = canvas.height / video.videoHeight;
// Draw the bounding boxes on the canvas
predictions.forEach(prediction => {
const x = prediction.bbox[0] * scaleX;
const y = prediction.bbox[1] * scaleY;
const width = prediction.bbox[2] * scaleX;
const height = prediction.bbox[3] * scaleY;
ctx.strokeStyle = 'blue';
ctx.lineWidth = 2;
ctx.strokeRect(x, y, width, height);
ctx.fillStyle = 'blue';
ctx.font = '18px Arial';
ctx.fillText(prediction.class, x, y);
});
// Call the drawFrame function again to continuously update the canvas
requestAnimationFrame(drawFrame);
}
// Start drawing video frames
drawFrame();
// Load the model.
cocoSsd.load().then(model => {
function detectFrame() {
// detect objects in the image.
model.detect(video).then(preds => {
predictions = preds
setTimeout(detectFrame, 100);
});
}
detectFrame()
});
});
}
function handleError(error) {
if (error.name === 'OverconstrainedError') {
const v = constraints.video;
alert(`The resolution ${v.width.exact}x${v.height.exact} px is not supported by your device.`);
} else if (error.name === 'NotAllowedError') {
alert('Permissions have not been granted to use your camera and ' +
'microphone, you need to allow the page access to your devices in ' +
'order for the demo to work.');
}
alert(`getUserMedia error: ${error.name}`, error);
}
async function init(e) {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: false, video: { deviceId: videoSelect.value ? { exact: videoSelect.value } : undefined } });
handleSuccess(stream);
e.target.disabled = true;
} catch (e) {
handleError(e);
}
}
</script>
</body>
</html>
軟件定義存儲(Software Defined Storage,SDS)是一種存儲技術的方法論,它通過軟件層面的控制和管理來處理存儲資源,使得存儲系統更加靈活、可擴展和易於管理。傳統的存儲系統通常使用專用的硬體設備和控制器來管理數據存儲,而軟件定義存儲採用軟件定義的方法,將存儲處理功能抽象出來,從而使得存儲資源可以根據需要進行分配和管理。