CVBpy 14.1
polimago/QmlCookieClassification
1import os, sys
2
3import cvb
4import cvb.ui
5from result_model import ResultModel
6from classification import Classification
7
8if sys.version_info >= (3, 11):
9 from PySide6.QtCore import QObject, QUrl, QAbstractListModel, Qt, QModelIndex
10 from PySide6.QtQml import QQmlApplicationEngine, qmlRegisterType
11 from PySide6.QtGui import QGuiApplication, QIcon
12else:
13 from PySide2.QtCore import QObject, QUrl, QAbstractListModel, Qt, QModelIndex
14 from PySide2.QtQml import QQmlApplicationEngine, qmlRegisterType
15 from PySide2.QtGui import QGuiApplication, QIcon
16
17
18if __name__ == "__main__":
19
20 app = QGuiApplication([])
21 app.setOrganizationName('STEMMER IMAGING')
22 app.setOrganizationDomain('https://www.stemmer-imaging.com/')
23 app.setApplicationName('Polimago Python tutorial')
24
25 # tell Windows the correct AppUserModelID for this process (shows icon in the taskbar)
26 if sys.platform == 'win32':
27 import ctypes
28 myappid = u'stemmerimaging.commonvisionblox.pycookieclassification.0'
29 ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
30
31 app.setWindowIcon(QIcon('Tutorial-Python_32x32.png'))
32
33 # load the device
35 os.path.join(cvb.install_path(), "drivers", "CVMock.vin"),
36 cvb.AcquisitionStack.Vin)
37
38 # setup QML interface objects
39 image_controller = cvb.ui.ImageController()
40 result_model = ResultModel(image_controller)
41
42 # main classification object
43 classification = Classification(device, result_model)
44
45 # register QML components for an image display
48
49 engine = QQmlApplicationEngine()
50 context = engine.rootContext()
51
52 # create a controller object to communicate with QML
53 context.setContextProperty("mainImage", image_controller)
54 # create a Minos result model to communicate with QML for overlays
55 context.setContextProperty("resultModel", result_model)
56 # create a Minos search object to communicate with QML (grab, snap)
57 context.setContextProperty("classification", classification)
58
59 # load main QML file
60 engine.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), "main.qml"))
61
62 # do a first snap at startup
63 classification.snap()
64
65 app.exec_()
Union[cvb.GenICamDevice, cvb.VinDevice, cvb.EmuDevice, cvb.VideoDevice, cvb.NonStreamingDevice] open(str provider, int acquisition_stack=cvb.AcquisitionStack.PreferVin)
Opens a device with the given provider and acquisition stack.
Definition: __init__.py:1570
Controller object for the QML image view item.
Definition: __init__.py:14
None register(cls, str uri="CvbQuick", int version_major=1, int version_minor=0, str qml_name="ImageLabel")
Basically just calls qmlRegisterType(...).
Definition: __init__.py:124
None register(cls, str uri="CvbQuick", int version_major=1, int version_minor=0, str qml_name="ImageView")
Convenience method to register this type or a derived type in QML.
Definition: __init__.py:196
Common Vision Blox UI module for Python.
Definition: __init__.py:1
str install_path()
Directory Common Vision Blox has been installed to.
Definition: __init__.py:8257
1import os, sys
2
3import cvb
4import cvb.polimago
5
6if sys.version_info >= (3, 11):
7 from PySide6 import QtCore
8 from PySide6.QtCore import QObject, Qt, QUrl, Property, Signal, Slot
9else:
10 from PySide2 import QtCore
11 from PySide2.QtCore import QObject, Qt, QUrl, Property, Signal, Slot
12
13from result_model import ResultModel, PolimagoResult
14
15
16# Global params
17GRID_STEP = 0.6
18THRESHOLD = 0.0
19LOCALITY = 1.0
20
21
22class Classification(QtCore.QObject):
23
24 def __init__(self, device, result_model):
25 super().__init__()
26 self._device = device
27 self._stream = device.stream()
28 self._model = result_model
29 self._search = cvb.polimago.SearchPredictor(os.path.join(cvb.install_path(), "tutorial", "Polimago", "Images", "Cookies", "Cookies.psc"))
30 self._classification = cvb.polimago.ClassificationPredictor(os.path.join(cvb.install_path(), "tutorial", "Polimago", "Images", "Cookies", "CookieType.pcc"))
31
32 self._source = ""
33 self._search_predictor = ""
34 self._classification_predictor = ""
35
36 self.notify_source.connect(self.source_changed)
37 self.notify_search_predictor.connect(self.search_predictor_changed)
38 self.notify_classification_predictor.connect(self.classification_predictor_changed)
39
40 @Slot()
41 def snap(self):
42 # clear the result
43 self._model.update([None] * 0)
44 self._image, wait_status = self._stream.get_timed_snapshot(1000)
45 if (wait_status != cvb.WaitStatus.Ok):
46 return None
47 # refresh image view
48 self._model.refresh(self._image)
49
50 @Slot()
51 def classify(self):
52
53 if self._search == 0 or self._classification == 0:
54 sys.stderr.write('No search predictor or classification predictor was loaded.\n')
55 return None
56
57 # 1. find items
58 search_res, calls = self._search.grid_search(self._image, self._image.bounds, GRID_STEP, THRESHOLD, LOCALITY)
59
60 polimago_results = [None] * 0
61
62 for res in search_res:
63 # only use results with quality >= 0.3
64 if res.quality >= 0.3:
65 # 2. classify the items
66 pos = cvb.Point2D(res.x, res.y)
67 clf_res, confidence_distribution = self._classification.classify(self._image, pos)
68 polimago_results.append(PolimagoResult(pos, res.quality, clf_res))
69
70 # 3. show result
71 self._model.update(polimago_results)
72
73 def source(self):
74 return self._source
75
76 def set_source(self, imageSource):
77 self._source = imageSource
78 self.notify_source.emit()
79
80 def search_predictor(self):
81 return self._search_predictor
82
83 def set_search_predictor(self, searchPredictor):
84 self._search_predictor = searchPredictor
85 self.notify_search_predictor.emit()
86
87 def classification_predictor(self):
88 return self._classification_predictor
89
90 def set_classification_predictor(self, classificationPredictor):
91 self._classification_predictor = classificationPredictor
92 self.notify_classification_predictor.emit()
93
94 @Signal
95 def notify_source(self):
96 pass
97
98 @Signal
99 def notify_search_predictor(self):
100 pass
101
102 @Signal
103 def notify_classification_predictor(self):
104 pass
105
106 @Slot()
107 def source_changed(self):
108 # clear the result
109 self._model.update([])
110 # clear predictors
111 self._search = 0
112 self._classification = 0
113 # close current device
114 self._device.close()
115 # open new device / image
116 path = self._source.toLocalFile()
117 if path.endswith(".vin"):
118 self._device = cvb.DeviceFactory.open(
119 path, cvb.AcquisitionStack.Vin)
120 self._stream = self._device.stream()
121 self.snap()
122 elif path.endswith(".bmp"):
123 self._image = cvb.Image(path)
124 self._model.refresh(self._image)
125
126 @Slot()
127 def search_predictor_changed(self):
128 if not self.search_predictor().isEmpty():
129 self._search = cvb.polimago.SearchPredictor(self.search_predictor().toLocalFile())
130
131 @Slot()
132 def classification_predictor_changed(self):
133 if not self.classification_predictor().isEmpty():
134 self._classification = cvb.polimago.ClassificationPredictor(self.classification_predictor().toLocalFile())
135
136 imageSource = Property(QUrl, source, set_source)
137 searchPredictor = Property(QUrl, search_predictor, set_search_predictor)
138 classificationPredictor = Property(QUrl, classification_predictor, set_classification_predictor)
139
140
141
The Common Vision Blox image.
Definition: __init__.py:2038
Multi-purpose 2D vector class.
Definition: __init__.py:4230
Predictor to classify patterns with.
Definition: __init__.py:20
Predictor that may be used for searching objects.
Definition: __init__.py:566
Common Vision Blox Polimago module for Python.
Definition: __init__.py:1
1import os, sys
2
3import cvb
4import cvb.ui
5
6if sys.version_info >= (3, 11):
7 from PySide6.QtCore import QObject, QAbstractListModel, Qt, QModelIndex, Property, Signal, Slot
8else:
9 from PySide2.QtCore import QObject, QAbstractListModel, Qt, QModelIndex, Property, Signal, Slot
10
11class PolimagoResult(object):
12
13 def __init__(self, pos, pos_quality, clf_res):
14 self._pos = pos
15 self._pos_quality = pos_quality
16 self._name = clf_res.name
17 self._color_quality = clf_res.quality
18
19 def position(self):
20 return self._pos
21
22 def name(self):
23 return self._name
24
25 def position_quality(self):
26 return self._pos_quality
27
28 def color_quality(self):
29 return self._color_quality
30
31class ResultModel(QAbstractListModel):
32
33 LineText = Qt.UserRole
34 StartPosition = Qt.UserRole + 1
35 Quality = Qt.UserRole + 2
36
37 def __init__(self, image_controller, parent=None):
38 super(ResultModel, self).__init__(parent)
39 self._image_controller = image_controller
40 self._results = [None] * 0
41
42 def update(self, polimago_results):
43 self._results = polimago_results
44 self.layoutChanged.emit()
45
46 def roleNames(self):
47 roles = dict()
48 roles[ResultModel.LineText] = b"lineText"
49 roles[ResultModel.StartPosition] = b"startPosition"
50 roles[ResultModel.Quality] = b"quality"
51 return roles
52
53 def rowCount(self, parent = QModelIndex()):
54 return len(self._results)
55
56 def data(self, index, role = Qt.DisplayRole):
57 if not index.isValid():
58 return None
59
60 result = self._results[index.row()]
61
62 if role == ResultModel.LineText:
63 return result.name()
64 elif role == ResultModel.StartPosition:
65 return cvb.ui.cvb_to_qt_point(result.position())
66 elif role == ResultModel.Quality:
67 return result.position_quality()
68 else:
69 return None
70
71 @Slot()
72 def refresh(self, image):
73 self._image_controller.refresh(image)
74
75
PySide2.QtCore.QPointF cvb_to_qt_point(cvb.Point2D point)
Convenience converter for points.
Definition: __init__.py:381
1import QtQuick 2.3
2import CvbQuick 1.0 as CvbQuick
3import QtQuick.Controls 1.3
4import QtQuick.Layouts 1.2
5import QtQuick.Dialogs 1.2
6
7ApplicationWindow
8{
9 id: rootWin
10 visible: true
11 property int margin: 11
12 width: 1080
13 height: 720
14
15 ColumnLayout
16 {
17 id: mainLayout
18 anchors.fill: parent
19 anchors.margins: margin
20
21 CvbQuick.ImageView
22 {
23 id: view
24 image: mainImage
25 Layout.fillWidth: true
26 Layout.fillHeight: true
27
28 Repeater
29 {
30 model : resultModel
31 delegate: TextLabel
32 {
33 imageView : view
34 imageX : startPosition.x
35 imageY : startPosition.y
36 posQuality : quality
37 text : lineText
38
39 }
40 }
41 }
42
43 RowLayout
44 {
45 Button
46 {
47 text: "Load Image"
48 onClicked: openImageDialog.open()
49 }
50
51 Button
52 {
53 text: "Load Search Predictor"
54 onClicked: loadSearchPredictorDialog.open()
55 }
56
57 Button
58 {
59 text: "Load Classification Predictor"
60 onClicked: loadClassificationPredictorDialog.open()
61 }
62
63 Button
64 {
65 id: btnSnap
66 text: "Snap"
67 onClicked: classification.snap()
68 }
69
70 Button
71 {
72 text: "Classify"
73 onClicked: classification.classify()
74 }
75
76
77 }
78
79 FileDialog
80 {
81 id: openImageDialog
82 title: "Open Image"
83 selectExisting: true
84 nameFilters: [ "Image source files (*.bmp)", "Common Vision Blox driver files (*.vin)" ]
85 onAccepted:
86 {
87 classification.imageSource = openImageDialog.fileUrl;
88 var path = classification.imageSource.toString();
89 if (path.endsWith(".bmp"))
90 btnSnap.enabled = false
91 else
92 btnSnap.enabled = true
93 }
94 }
95
96 FileDialog
97 {
98 id: loadSearchPredictorDialog
99 title: "Load Search Predictor"
100 selectExisting: true
101 nameFilters: [ "Search classifier (*.psc)" ]
102 onAccepted: classification.searchPredictor = loadSearchPredictorDialog.fileUrl;
103 }
104
105 FileDialog
106 {
107 id: loadClassificationPredictorDialog
108 title: "Load Classification Predictor"
109 selectExisting: true
110 nameFilters: [ "Classification predictor (*.pcc)" ]
111 onAccepted: classification.classificationPredictor = loadClassificationPredictorDialog.fileUrl;
112 }
113 }
114}
1import QtQuick 2.3
2import CvbQuick 1.0 as CvbQuick
3
4// Output result as overlay
5CvbQuick.ImageLabel
6{
7 id: label
8 property var text : ""
9 property var posQuality : 0.0
10
11 Text
12 {
13 text: label.text + " (Q=" + String(label.posQuality.toFixed(2)) + ")"
14 font.pointSize : 10
15 font.bold: true
16 color : "white"
17 style: Text.Outline
18 styleColor: "black"
19 }
20}