CVBpy 14.0
polimago/QmlCookieClassification
1import os, sys
2
3import cvb
4import cvb.ui
5from result_model import ResultModel
6from classification import Classification
7
8from PySide2.QtCore import QObject, QUrl, QAbstractListModel, Qt, QModelIndex
9from PySide2.QtQml import QQmlApplicationEngine, qmlRegisterType
10from PySide2.QtGui import QGuiApplication, QIcon
11
12
13if __name__ == "__main__":
14
15 app = QGuiApplication([])
16 app.setOrganizationName('STEMMER IMAGING')
17 app.setOrganizationDomain('https://www.stemmer-imaging.com/')
18 app.setApplicationName('Polimago Python tutorial')
19
20 # tell Windows the correct AppUserModelID for this process (shows icon in the taskbar)
21 if sys.platform == 'win32':
22 import ctypes
23 myappid = u'stemmerimaging.commonvisionblox.pycookieclassification.0'
24 ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
25
26 app.setWindowIcon(QIcon('Tutorial-Python_32x32.png'))
27
28 # load the device
30 os.path.join(cvb.install_path(), "drivers", "CVMock.vin"),
31 cvb.AcquisitionStack.Vin)
32
33 # setup QML interface objects
34 image_controller = cvb.ui.ImageController()
35 result_model = ResultModel(image_controller)
36
37 # main classification object
38 classification = Classification(device, result_model)
39
40 # register QML components for an image display
43
44 engine = QQmlApplicationEngine()
45 context = engine.rootContext()
46
47 # create a controller object to communicate with QML
48 context.setContextProperty("mainImage", image_controller)
49 # create a Minos result model to communicate with QML for overlays
50 context.setContextProperty("resultModel", result_model)
51 # create a Minos search object to communicate with QML (grab, snap)
52 context.setContextProperty("classification", classification)
53
54 # load main QML file
55 engine.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), "main.qml"))
56
57 # do a first snap at startup
58 classification.snap()
59
60 app.exec_()
Union[cvb.GenICamDevice, cvb.VinDevice, cvb.EmuDevice, cvb.VideoDevice, cvb.NonStreamingDevice] open(str provider, int acquisition_stack=cvb.AcquisitionStack.PreferVin)
Opens a device with the given provider and acquisition stack.
Definition: __init__.py:1327
Controller object for the QML image view item.
Definition: __init__.py:14
None register(cls, str uri="CvbQuick", int version_major=1, int version_minor=0, str qml_name="ImageLabel")
Convenience method to register this type or a derived type in QML.
Definition: __init__.py:122
None register(cls, str uri="CvbQuick", int version_major=1, int version_minor=0, str qml_name="ImageView")
Convenience method to register this type or a derived type in QML.
Definition: __init__.py:193
Common Vision Blox UI module for Python.
Definition: __init__.py:1
str install_path()
Directory Common Vision Blox has been installed to.
Definition: __init__.py:7146
1import os, sys
2
3import cvb
4import cvb.polimago
5
6from PySide2 import QtCore
7from PySide2.QtCore import QObject, Qt, QUrl, Property, Signal, Slot
8
9from result_model import ResultModel, PolimagoResult
10
11
12# Global params
13GRID_STEP = 0.6
14THRESHOLD = 0.0
15LOCALITY = 1.0
16
17
18class Classification(QtCore.QObject):
19
20 def __init__(self, device, result_model):
21 super().__init__()
22 self._device = device
23 self._stream = device.stream()
24 self._model = result_model
25 self._search = cvb.polimago.SearchPredictor(os.path.join(cvb.install_path(), "tutorial", "Polimago", "Images", "Cookies", "Cookies.psc"))
26 self._classification = cvb.polimago.ClassificationPredictor(os.path.join(cvb.install_path(), "tutorial", "Polimago", "Images", "Cookies", "CookieType.pcc"))
27
28 self._source = ""
29 self._search_predictor = ""
30 self._classification_predictor = ""
31
32 self.notify_source.connect(self.source_changed)
33 self.notify_search_predictor.connect(self.search_predictor_changed)
34 self.notify_classification_predictor.connect(self.classification_predictor_changed)
35
36 @Slot()
37 def snap(self):
38 # clear the result
39 self._model.update([None] * 0)
40 self._image, wait_status = self._stream.get_timed_snapshot(1000)
41 if (wait_status != cvb.WaitStatus.Ok):
42 return None
43 # refresh image view
44 self._model.refresh(self._image)
45
46 @Slot()
47 def classify(self):
48
49 if self._search == 0 or self._classification == 0:
50 sys.stderr.write('No search predictor or classification predictor was loaded.\n')
51 return None
52
53 # 1. find items
54 search_res, calls = self._search.grid_search(self._image, self._image.bounds, GRID_STEP, THRESHOLD, LOCALITY)
55
56 polimago_results = [None] * 0
57
58 for res in search_res:
59 # only use results with quality >= 0.3
60 if res.quality >= 0.3:
61 # 2. classify the items
62 pos = cvb.Point2D(res.x, res.y)
63 clf_res, confidence_distribution = self._classification.classify(self._image, pos)
64 polimago_results.append(PolimagoResult(pos, res.quality, clf_res))
65
66 # 3. show result
67 self._model.update(polimago_results)
68
69 def source(self):
70 return self._source
71
72 def set_source(self, imageSource):
73 self._source = imageSource
74 self.notify_source.emit()
75
76 def search_predictor(self):
77 return self._search_predictor
78
79 def set_search_predictor(self, searchPredictor):
80 self._search_predictor = searchPredictor
81 self.notify_search_predictor.emit()
82
83 def classification_predictor(self):
84 return self._classification_predictor
85
86 def set_classification_predictor(self, classificationPredictor):
87 self._classification_predictor = classificationPredictor
88 self.notify_classification_predictor.emit()
89
90 @Signal
91 def notify_source(self):
92 pass
93
94 @Signal
95 def notify_search_predictor(self):
96 pass
97
98 @Signal
99 def notify_classification_predictor(self):
100 pass
101
102 @Slot()
103 def source_changed(self):
104 # clear the result
105 self._model.update([])
106 # clear predictors
107 self._search = 0
108 self._classification = 0
109 # close current device
110 self._device.close()
111 # open new device / image
112 path = self._source.toLocalFile()
113 if path.endswith(".vin"):
114 self._device = cvb.DeviceFactory.open(
115 path, cvb.AcquisitionStack.Vin)
116 self._stream = self._device.stream()
117 self.snap()
118 elif path.endswith(".bmp"):
119 self._image = cvb.Image(path)
120 self._model.refresh(self._image)
121
122 @Slot()
123 def search_predictor_changed(self):
124 if not self.search_predictor().isEmpty():
125 self._search = cvb.polimago.SearchPredictor(self.search_predictor().toLocalFile())
126
127 @Slot()
128 def classification_predictor_changed(self):
129 if not self.classification_predictor().isEmpty():
130 self._classification = cvb.polimago.ClassificationPredictor(self.classification_predictor().toLocalFile())
131
132 imageSource = Property(QUrl, source, set_source)
133 searchPredictor = Property(QUrl, search_predictor, set_search_predictor)
134 classificationPredictor = Property(QUrl, classification_predictor, set_classification_predictor)
135
136
137
The Common Vision Blox image.
Definition: __init__.py:1737
Multi-purpose 2D vector class.
Definition: __init__.py:3406
Predictor to classify patterns with.
Definition: __init__.py:20
Predictor that may be used for searching objects.
Definition: __init__.py:544
Common Vision Blox Polimago module for Python.
Definition: __init__.py:1
1import os
2
3import cvb
4import cvb.ui
5
6from PySide2.QtCore import QObject, QAbstractListModel, Qt, QModelIndex, Property, Signal, Slot
7
8class PolimagoResult(object):
9
10 def __init__(self, pos, pos_quality, clf_res):
11 self._pos = pos
12 self._pos_quality = pos_quality
13 self._name = clf_res.name
14 self._color_quality = clf_res.quality
15
16 def position(self):
17 return self._pos
18
19 def name(self):
20 return self._name
21
22 def position_quality(self):
23 return self._pos_quality
24
25 def color_quality(self):
26 return self._color_quality
27
28class ResultModel(QAbstractListModel):
29
30 LineText = Qt.UserRole
31 StartPosition = Qt.UserRole + 1
32 Quality = Qt.UserRole + 2
33
34 def __init__(self, image_controller, parent=None):
35 super(ResultModel, self).__init__(parent)
36 self._image_controller = image_controller
37 self._results = [None] * 0
38
39 def update(self, polimago_results):
40 self._results = polimago_results
41 self.layoutChanged.emit()
42
43 def roleNames(self):
44 roles = dict()
45 roles[ResultModel.LineText] = b"lineText"
46 roles[ResultModel.StartPosition] = b"startPosition"
47 roles[ResultModel.Quality] = b"quality"
48 return roles
49
50 def rowCount(self, parent = QModelIndex()):
51 return len(self._results)
52
53 def data(self, index, role = Qt.DisplayRole):
54 if not index.isValid():
55 return None
56
57 result = self._results[index.row()]
58
59 if role == ResultModel.LineText:
60 return result.name()
61 elif role == ResultModel.StartPosition:
62 return cvb.ui.cvb_to_qt_point(result.position())
63 elif role == ResultModel.Quality:
64 return result.position_quality()
65 else:
66 return None
67
68 @Slot()
69 def refresh(self, image):
70 self._image_controller.refresh(image)
71
72
PySide2.QtCore.QPointF cvb_to_qt_point(cvb.Point2D point)
Convenience converter for points.
Definition: __init__.py:381
1import QtQuick 2.3
2import CvbQuick 1.0 as CvbQuick
3import QtQuick.Controls 1.3
4import QtQuick.Layouts 1.2
5import QtQuick.Dialogs 1.2
6
7ApplicationWindow
8{
9 id: rootWin
10 visible: true
11 property int margin: 11
12 width: 1080
13 height: 720
14
15 ColumnLayout
16 {
17 id: mainLayout
18 anchors.fill: parent
19 anchors.margins: margin
20
21 CvbQuick.ImageView
22 {
23 id: view
24 image: mainImage
25 Layout.fillWidth: true
26 Layout.fillHeight: true
27
28 Repeater
29 {
30 model : resultModel
31 delegate: TextLabel
32 {
33 imageView : view
34 imageX : startPosition.x
35 imageY : startPosition.y
36 posQuality : quality
37 text : lineText
38
39 }
40 }
41 }
42
43 RowLayout
44 {
45 Button
46 {
47 text: "Load Image"
48 onClicked: openImageDialog.open()
49 }
50
51 Button
52 {
53 text: "Load Search Predictor"
54 onClicked: loadSearchPredictorDialog.open()
55 }
56
57 Button
58 {
59 text: "Load Classification Predictor"
60 onClicked: loadClassificationPredictorDialog.open()
61 }
62
63 Button
64 {
65 id: btnSnap
66 text: "Snap"
67 onClicked: classification.snap()
68 }
69
70 Button
71 {
72 text: "Classify"
73 onClicked: classification.classify()
74 }
75
76
77 }
78
79 FileDialog
80 {
81 id: openImageDialog
82 title: "Open Image"
83 selectExisting: true
84 nameFilters: [ "Image source files (*.bmp)", "Common Vision Blox driver files (*.vin)" ]
85 onAccepted:
86 {
87 classification.imageSource = openImageDialog.fileUrl;
88 var path = classification.imageSource.toString();
89 if (path.endsWith(".bmp"))
90 btnSnap.enabled = false
91 else
92 btnSnap.enabled = true
93 }
94 }
95
96 FileDialog
97 {
98 id: loadSearchPredictorDialog
99 title: "Load Search Predictor"
100 selectExisting: true
101 nameFilters: [ "Search classifier (*.psc)" ]
102 onAccepted: classification.searchPredictor = loadSearchPredictorDialog.fileUrl;
103 }
104
105 FileDialog
106 {
107 id: loadClassificationPredictorDialog
108 title: "Load Classification Predictor"
109 selectExisting: true
110 nameFilters: [ "Classification predictor (*.pcc)" ]
111 onAccepted: classification.classificationPredictor = loadClassificationPredictorDialog.fileUrl;
112 }
113 }
114}
1import QtQuick 2.3
2import CvbQuick 1.0 as CvbQuick
3
4// Output result as overlay
5CvbQuick.ImageLabel
6{
7 id: label
8 property var text : ""
9 property var posQuality : 0.0
10
11 Text
12 {
13 text: label.text + " (Q=" + String(label.posQuality.toFixed(2)) + ")"
14 font.pointSize : 10
15 font.bold: true
16 color : "white"
17 style: Text.Outline
18 styleColor: "black"
19 }
20}