import multiprocessing
import os
from ultralytics import YOLO
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
def my_function():
model = YOLO('yolov8.yaml').load("yolov8n.pt")
# Train the model
model.train(data='./data.yaml', epochs=300, imgsz=640)
model.val(data="./data.yaml")
if __name__ == '__main__':
multiprocessing.freeze_support() # Optional, if you're freezing the script
my_function()
import av
import asyncio
import cv2
import numpy as np
import threading
import time
from av import VideoFrame
from av.codec import CodecContext
# Define the RTMP server URL
rtmp_url = 'rtmp://127.0.0.1/live/test1' # Replace with your RTMP server URL
# Function to capture webcam frames and push to RTMP server
def capture_and_push():
# Open the video capture device (webcam)
cap = cv2.VideoCapture(0)
# Create an output container for the RTMP stream
output_container = av.open(rtmp_url, 'w', format='flv')
# Set up video stream parameters
video_stream = output_container.add_stream('h264', rate=30)
video_stream.width = 640
video_stream.height = 480
# Create a codec context for H.264 encoding
codecContext = CodecContext.create('h264', 'w')
# Create a thread for encoding and pushing frames
def encode_and_push_frames():
while True:
ret, frame = cap.read()
if not ret:
break
# Convert the frame to a VideoFrame
frame = VideoFrame.from_ndarray(frame, format='bgr24')
frame.pts = frame.pts
frame.time_base = frame.time_base
# Encode the frame and write it to the output container
packet = video_stream.encode(frame)
output_container.mux(packet)
encode_thread = threading.Thread(target=encode_and_push_frames)
encode_thread.start()
# Wait for the encode thread to finish
encode_thread.join()
# Release the video capture device and close the output container
cap.release()
output_container.close()
if __name__ == "__main__":
capture_and_push()
import av
import pyav
import threading
# Define the source RTMP URL and destination RTMP URL
source_url = 'rtmp://127.0.0.1/live/test1'
destination_url = 'rtmp://127.0.0.1/live/test2'
# Create an input container for the source RTMP stream
input_container = av.open(source_url, mode='r')
# Create an output container for the destination RTMP stream
output_container = av.open(destination_url, mode='w')
# Set up a video encoder for H.264
video_stream = output_container.add_stream('h264', rate=30)
video_stream.options['x264opts'] = 'nal-hrd=cbr'
video_stream.options['c:v'] = 'libx264'
# Define a SEI message to add to the video frames (modify this as needed)
sei_data = b'Some SEI Data'
# Function to add SEI data to frames and write to the output
def process_frames():
for packet in input_container.demux():
if packet.stream.type == 'video':
for frame in packet.decode():
# Add SEI data to the frame
frame.side_data['sei'] = sei_data
# Encode and write the frame to the output
output_container.mux(packet)
# Create a thread to process and write frames
frame_thread = threading.Thread(target=process_frames)
try:
# Start the frame processing thread
frame_thread.start()
# Run the main loop to write the output to the destination RTMP stream
while True:
output_container.mux(output_container.recv())
except (KeyboardInterrupt, pyav.AVError):
pass
finally:
# Clean up resources and close the containers
frame_thread.join()
input_container.close()
output_container.close()