new camera handling concept

This commit is contained in:
2025-09-18 20:14:12 +02:00
parent 2187536c7d
commit 6d616fd15e
7 changed files with 384 additions and 0 deletions

18
core/base.py Normal file
View File

@@ -0,0 +1,18 @@
from
class BaseImageSource(QObject):
frameReady = Signal(QPixmap)
errorOccurred = Signal(str)
def start(self): ...
def stop(self): ...
class BaseControlSource(QObject):
errorOccurred = Signal(str)
parameterChanged = Signal(str, object)
def set_parameter(self, name: str, value): ...
def get_parameter(self, name: str): ...
def list_parameters(self) -> dict: ...

View File

@@ -0,0 +1,49 @@
from abc import ABC, abstractmethod
class BaseCamera(ABC):
"""Interfejs wspólny dla wszystkich backendów kamer."""
@abstractmethod
def connect(self) -> bool:
"""Nawiązuje połączenie z urządzeniem."""
raise NotImplementedError
@abstractmethod
def disconnect(self):
"""Zamyka połączenie z urządzeniem."""
raise NotImplementedError
@abstractmethod
def start_stream(self):
"""Rozpocznij strumień wideo."""
raise NotImplementedError
@abstractmethod
def stop_stream(self):
"""Zatrzymaj strumień wideo."""
raise NotImplementedError
@abstractmethod
def get_frame(self):
"""Pobierz jedną klatkę liveview."""
raise NotImplementedError
@abstractmethod
def capture_photo(self):
"""Zrób zdjęcie."""
raise NotImplementedError
@abstractmethod
def record_video(self):
"""Nagraj film."""
raise NotImplementedError
@abstractmethod
def get_available_settings(self) -> dict:
"""Zwraca słownik dostępnych ustawień i ich możliwych wartości."""
raise NotImplementedError
@abstractmethod
def set_setting(self, name: str, value) -> bool:
"""Ustawia wybraną wartość dla danego ustawienia."""
raise NotImplementedError

View File

@@ -0,0 +1,35 @@
from .base_camera import BaseCamera
class GPhotoBackend(BaseCamera):
def __init__(self) -> None:
self.camera = None
self.context = None
self._is_streaming = False
def connect(self) -> bool:
pass
def disconnect(self):
pass
def start_stream(self):
pass
def stop_stream(self):
pass
def get_frame(self):
pass
def capture_photo(self):
pass
def record_video(self):
pass
def get_available_settings(self) -> dict:
pass
def set_setting(self, name: str, value) -> bool:
pass

View File

@@ -0,0 +1,104 @@
# camera/opencv_camera.py
import cv2
import time
from PySide6.QtGui import QImage, QPixmap
from .base_camera import BaseCamera
class OpenCVCamera(BaseCamera):
"""Implementacja kamery przy użyciu OpenCV."""
def __init__(self, camera_index=0):
self.camera_index = camera_index
self.video_capture = None
self._is_streaming = False
# self._live_view_thread = None # Wewnętrzny wątek do pętli live view
def connect(self) -> bool:
self.video_capture = cv2.VideoCapture(self.camera_index)
if not self.video_capture.isOpened():
# self.error_occurred.emit(f"Nie można otworzyć kamery OpenCV o indeksie {self.camera_index}")
self.video_capture = None
return False
# print("Kamera OpenCV połączona.")
return True
def disconnect(self):
self.stop_stream()
if self.video_capture:
self.video_capture.release()
self.video_capture = None
# print("Kamera OpenCV rozłączona.")
# self.camera_disconnected.emit()
def start_stream(self):
if not self.video_capture or not self.video_capture.isOpened():
# self.error_occurred.emit("Próba uruchomienia podglądu na niepodłączonej kamerze.")
return
if self._is_streaming:
return # Już działa
self._is_streaming = True
# Uruchamiamy pętlę w metodzie, ponieważ cała klasa działa już w dedykowanym wątku
# self._live_view_loop()
def stop_stream(self):
self._is_streaming = False
def get_frame(self):
if not self.video_capture:
return None
ret, frame = self.video_capture.read()
if not ret:
self.stop_stream()
return None
rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format.Format_RGB888)
return qt_image
def capture_photo(self, save_path: str):
if not self.video_capture or not self.video_capture.isOpened():
# self.error_occurred.emit("Nie można zrobić zdjęcia, kamera nie jest podłączona.")
return
ret, frame = self.video_capture.read()
if ret:
try:
cv2.imwrite(save_path, frame)
print(f"Zdjęcie zapisane w: {save_path}")
# self.photo_captured.emit(save_path)
except Exception as e:
# self.error_occurred.emit(f"Błąd zapisu zdjęcia: {e}")
else:
# self.error_occurred.emit("Nie udało się przechwycić klatki do zdjęcia.")
def get_available_settings(self) -> dict:
# To jest uproszczona implementacja
if not self.video_capture:
return {}
return {
"brightness": self.video_capture.get(cv2.CAP_PROP_BRIGHTNESS),
"contrast": self.video_capture.get(cv2.CAP_PROP_CONTRAST),
"saturation": self.video_capture.get(cv2.CAP_PROP_SATURATION),
}
def set_setting(self, name: str, value) -> bool:
if not self.video_capture:
return False
prop_map = {
"brightness": cv2.CAP_PROP_BRIGHTNESS,
"contrast": cv2.CAP_PROP_CONTRAST,
"saturation": cv2.CAP_PROP_SATURATION,
}
if name in prop_map:
return self.video_capture.set(prop_map[name], value)
return False

80
core/gphoto_adapter.py Normal file
View File

@@ -0,0 +1,80 @@
from PySide6.QtCore import QObject, QThread, Signal, QTimer
from PySide6.QtGui import QImage, QPixmap
import cv2
import numpy as np
from .base import BaseControlSource, BaseImageSource
# try:
# import gphoto2 as gp
# except:
from . import mock_gphoto as gp
class GPhotoImageSource(BaseImageSource):
def __init__(self, camera: gp.Camera, fps=10, parent=None):
super().__init__(parent)
self.camera = camera
self.fps = fps
self.timer = None
def start(self):
self.timer = QTimer()
self.timer.timeout.connect(self._grab_frame)
self.timer.start(int(1000 / self.fps))
def _grab_frame(self):
try:
file = self.camera.capture_preview()
data = file.get_data_and_size()
frame = np.frombuffer(data, dtype=np.uint8)
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
if frame is None:
return
rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
qimg = QImage(rgb_image.data, w, h, ch * w, QImage.Format.Format_RGB888)
pixmap = QPixmap.fromImage(qimg)
self.frameReady.emit(pixmap)
except gp.GPhoto2Error as e:
self.errorOccurred.emit(f"GPhoto2 error: {e}")
def stop(self):
if self.timer:
self.timer.stop()
class GPhotoControlSource(BaseControlSource):
def __init__(self, camera: gp.Camera, parent=None):
super().__init__(parent)
self.camera = camera
def set_parameter(self, name, value):
try:
config = self.camera.get_config()
child = config.get_child_by_name(name)
child.set_value(value)
self.camera.set_config(config)
self.parameterChanged.emit(name, value)
except gp.GPhoto2Error as e:
self.errorOccurred.emit(str(e))
def get_parameter(self, name):
try:
config = self.camera.get_config()
child = config.get_child_by_name(name)
return child.get_value()
except gp.GPhoto2Error as e:
self.errorOccurred.emit(str(e))
return None
def list_parameters(self):
params = {}
try:
config = self.camera.get_config()
for child in config.get_children():
params[child.get_name()] = child.get_value()
except gp.GPhoto2Error as e:
self.errorOccurred.emit(str(e))
return params

64
core/mock_gphoto.py Normal file
View File

@@ -0,0 +1,64 @@
import cv2
import numpy as np
class GPhoto2Error(Exception):
pass
class CameraFileMock:
"""Mock obiektu zwracanego przez gphoto2.Camera.capture_preview()"""
def __init__(self, frame: np.ndarray):
# Kodowanie do JPEG, żeby symulować prawdziwe dane z kamery
success, buf = cv2.imencode(".jpg", frame)
if not success:
raise GPhoto2Error("Nie udało się zakodować ramki testowej.")
self._data = buf.tobytes()
def get_data_and_size(self):
return self._data
return self._data, len(self._data)
class Camera:
def __init__(self):
self._frame_counter = 0
self._running = False
def init(self):
self._running = True
print("[my_gphoto] Kamera MOCK zainicjalizowana")
def exit(self):
self._running = False
print("[my_gphoto] Kamera MOCK wyłączona")
def capture_preview(self):
if not self._running:
raise GPhoto2Error("Kamera MOCK nie jest uruchomiona")
# przykład 1: wczytaj stały obrazek z pliku
# frame = cv2.imread("test_frame.jpg")
# if frame is None:
# raise GPhoto2Error("Nie znaleziono test_frame.jpg")
# przykład 2: wygeneruj kolorową planszę
h, w = 480, 640
color = (self._frame_counter % 255, 100, 200)
frame = np.full((h, w, 3), color, dtype=np.uint8)
# dodanie napisu
text = "OBRAZ TESTOWY"
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1.5
thickness = 3
color_text = (255, 255, 255)
(text_w, text_h), _ = cv2.getTextSize(text, font, scale, thickness)
x = (w - text_w) // 2
y = (h + text_h) // 2
cv2.putText(frame, text, (x, y), font, scale, color_text, thickness, cv2.LINE_AA)
self._frame_counter += 1
return CameraFileMock(frame)

34
core/opencv_adapter.py Normal file
View File

@@ -0,0 +1,34 @@
class OpenCVControlSource(BaseControlSource):
def __init__(self, cap: cv2.VideoCapture, parent=None):
super().__init__(parent)
self.cap = cap
def set_parameter(self, name, value):
prop_id = getattr(cv2, name, None)
if prop_id is None:
self.errorOccurred.emit(f"Nieznany parametr {name}")
return
self.cap.set(prop_id, value)
self.parameterChanged.emit(name, value)
def get_parameter(self, name):
prop_id = getattr(cv2, name, None)
if prop_id is None:
self.errorOccurred.emit(f"Nieznany parametr {name}")
return None
return self.cap.get(prop_id)
def list_parameters(self):
return {
"CAP_PROP_BRIGHTNESS": self.cap.get(cv2.CAP_PROP_BRIGHTNESS),
"CAP_PROP_CONTRAST": self.cap.get(cv2.CAP_PROP_CONTRAST),
"CAP_PROP_SATURATION": self.cap.get(cv2.CAP_PROP_SATURATION),
"CAP_PROP_GAIN": self.cap.get(cv2.CAP_PROP_GAIN),
"CAP_PROP_EXPOSURE": self.cap.get(cv2.CAP_PROP_EXPOSURE),
}