CVBpy 14.1
minos/QmlMinos
1import os, sys
2
3import cvb
4import cvb.ui
5from minos_result_model import MinosResultModel
6from minos_search import MinosSearch
7
8if sys.version_info >= (3, 11):
9 from PySide6.QtCore import QObject, QUrl, QAbstractListModel, Qt, QModelIndex
10 from PySide6.QtQml import QQmlApplicationEngine, qmlRegisterType
11 from PySide6.QtGui import QGuiApplication
12 from PySide6.QtGui import QIcon
13else:
14 from PySide2.QtCore import QObject, QUrl, QAbstractListModel, Qt, QModelIndex
15 from PySide2.QtQml import QQmlApplicationEngine, qmlRegisterType
16 from PySide2.QtGui import QGuiApplication
17 from PySide2.QtGui import QIcon
18
19
20if __name__ == "__main__":
21
22 app = QGuiApplication([])
23 app.setOrganizationName('STEMMER IMAGING')
24 app.setOrganizationDomain('https://www.stemmer-imaging.com/')
25 app.setApplicationName('Minos Python tutorial')
26
27 # tell Windows the correct AppUserModelID for this process (shows icon in the taskbar)
28 if sys.platform == 'win32':
29 import ctypes
30 myappid = u'stemmerimaging.commonvisionblox.pyminos.0'
31 ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
32
33 app.setWindowIcon(QIcon('Tutorial-Python_32x32.png'))
34
35 # open device
37 os.path.join(cvb.install_path(), "drivers", "CVMock.vin"),
38 cvb.AcquisitionStack.Vin) as device:
39
40 # setup QML interface objects
41 image_controller = cvb.ui.ImageController()
42 minos_result_model = MinosResultModel(image_controller)
43
44 # main search object
45 minos_search = MinosSearch(device.stream(), minos_result_model)
46
47 # register QML components for an image display
50
51 engine = QQmlApplicationEngine()
52 context = engine.rootContext()
53
54 # create a controller object to communicate with QML
55 context.setContextProperty("mainImage", image_controller)
56 # create a Minos result model to communicate with QML for overlays
57 context.setContextProperty("minosResultModel", minos_result_model)
58 # create a Minos search object to communicate with QML (grab, snap)
59 context.setContextProperty("minosSearch", minos_search)
60
61 # load main QML file
62 engine.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), "main.qml"))
63
64 # do a first snap at startup
65 minos_search.snap()
66
67 app.exec_()
Union[cvb.GenICamDevice, cvb.VinDevice, cvb.EmuDevice, cvb.VideoDevice, cvb.NonStreamingDevice] open(str provider, int acquisition_stack=cvb.AcquisitionStack.PreferVin)
Opens a device with the given provider and acquisition stack.
Definition: __init__.py:1570
Controller object for the QML image view item.
Definition: __init__.py:14
None register(cls, str uri="CvbQuick", int version_major=1, int version_minor=0, str qml_name="ImageLabel")
Basically just calls qmlRegisterType(...).
Definition: __init__.py:124
None register(cls, str uri="CvbQuick", int version_major=1, int version_minor=0, str qml_name="ImageView")
Convenience method to register this type or a derived type in QML.
Definition: __init__.py:196
Common Vision Blox UI module for Python.
Definition: __init__.py:1
str install_path()
Directory Common Vision Blox has been installed to.
Definition: __init__.py:8257
1import os, sys
2
3import cvb
4import cvb.minos
5
6if sys.version_info >= (3, 11):
7 from PySide6 import QtCore
8 from PySide6.QtCore import QObject, Qt, Slot
9else:
10 from PySide2 import QtCore
11 from PySide2.QtCore import QObject, Qt, Slot
12
13from minos_result_model import MinosResultModel, MinosResultData
14
15# Global params
16FIRST_AREA_RADIUS = 8.0
17LINE_RADIUS = 4
18WORD_HEIGHT = 4
19WORD_WIDTH = 4
20LINE_STEP_RADIUS = 8
21
22LINE_STEP = cvb.Point2D(0.0, 16.0)
23OCR_AOI = cvb.Area2D(cvb.Point2D(-3.0, -3.0), cvb.Point2D(3.0, -3.0), cvb.Point2D(-3.0, 3.0))
24
25
26class MinosSearch(QtCore.QObject):
27
28 def __init__(self, stream, minos_result_model):
29 super().__init__()
30 self._stream = stream
31 self._model = minos_result_model
32 self._classifier = cvb.minos.Classifier(os.path.join(cvb.install_path(), "tutorial", "Minos", "Images", "OCR", "Training Set", "Numbers.clf"))
33
34
35
36 @Slot()
37 def snap(self):
38 image, wait_status = self._stream.get_timed_snapshot(1000)
39 if (wait_status != cvb.WaitStatus.Ok):
40 return None
41
42 # Minos search in snap
43 result = MinosResultData()
44 result.image = image
45
46 stop_watch = cvb.StopWatch()
47 stop_watch.start()
48 result.list = self.search(image.planes[0])
49 self._model.set_processing_time(stop_watch.time_span)
50
51 self._model.push_data(result)
52
53 # The Minos search
54 def search(self, plane):
55 width = plane.map().width
56 height = plane.map().height
57
58 # STEP 1 finding the top most line
59 # create an area centered in horizontal direction
60 # size of the area is FIRST_AREA_WIDTH = ImageHeight
61 # the direction of the area is set to the bottom
62 # create an area from these values
63 search_aoi = cvb.Area2D(cvb.Point2D(width / 2 - FIRST_AREA_RADIUS, 0.0), cvb.Point2D(width / 2 + FIRST_AREA_RADIUS, 0.0), cvb.Point2D(width / 2 - FIRST_AREA_RADIUS, height - 1.0))
64
65 # search for the first object
66 search_result = self._classifier.search(plane, cvb.minos.SearchMode.FindFirst, search_aoi)
67
68
69 if (search_result.quality < 0.1):
70 return []
71
72 # now we found the first line of the text
73
74 # STEP 2 finding the beginning of a line
75 # try to find the first char in the line
76 # create an area centered around the first line
77 # beginning at the left side of the image
78 # size of the area is LINE_HEIGHT * dXPos
79 # the direction of the area is set to the right
80 # create an area from these values
81 position = search_result.position
82
83 line_aoi = cvb.Area2D(cvb.Point2D(0.0, position.y - LINE_RADIUS), cvb.Point2D(0.0, position.y + LINE_RADIUS),cvb.Point2D(position.x, position.y - LINE_RADIUS))
84
85 # do a line by line search
86 result_list = [None] * 0
87 self.search_line(plane, line_aoi, result_list)
88 return result_list
89
90
91 # Minos search per line
92 def search_line(self, plane, line_aoi, result_list):
93
94 # search for the first object
95 search_result = self._classifier.search(plane, cvb.minos.SearchMode.FindFirst, line_aoi)
96
97 if (search_result.quality == 0.0):
98 return None
99
100 # now we found the first char in the first line
101
102 # STEP 3 read the string
103 # save the x and y position of the line
104 # try to read the first word
105 # create an area left aligned the first char
106 # size of WORD_WIDTH * WORD_HEIGHT
107 # create an area from these values
108 line_start = search_result.position
109
110 read_aoi = cvb.Area2D(cvb.Point2D(line_start.x, line_start.y - WORD_HEIGHT / 2), cvb.Point2D(line_start.x + WORD_WIDTH / 2,line_start.y - WORD_HEIGHT / 2), cvb.Point2D(line_start.x, line_start.y + WORD_HEIGHT / 2))
111
112 # read the word
113 search_result = self._classifier.read(plane, read_aoi, OCR_AOI)
114
115 # save the result
116 stream = ""
117 for i in range(len(search_result)):
118 stream += search_result[i].name
119
120 result = (line_start, stream)
121 result_list.append(result)
122
123 line_start += LINE_STEP
124
125 # STEP 4 jump to the next line
126 # try to read the first word
127 # create an area left aligned the first char
128 # size of WORD_WIDTH * WORD_HEIGHT
129 # create an area from these values
130 next_line_aoi = cvb.Area2D(cvb.Point2D(line_start.x - LINE_STEP_RADIUS, line_start.y - LINE_STEP_RADIUS), cvb.Point2D(line_start.x + LINE_STEP_RADIUS, line_start.y - LINE_STEP_RADIUS), cvb.Point2D(line_start.x - LINE_STEP_RADIUS, line_start.y + LINE_STEP_RADIUS))
131
132 # next line (recursion should - no to deep)
133 self.search_line(plane, next_line_aoi, result_list)
134
135
Structure that represents an area of interest in the image.
Definition: __init__.py:435
Multi-purpose 2D vector class.
Definition: __init__.py:4230
Speed measurement object.
Definition: __init__.py:5637
Load a saved classifier from a file.
Definition: __init__.py:29
Common Vision Blox Minos module for Python.
Definition: __init__.py:1
1import os, sys
2
3import cvb
4import cvb.ui
5
6if sys.version_info >= (3, 11):
7 from PySide6.QtCore import QObject, QAbstractListModel, Qt, QModelIndex, Property, Signal, Slot
8else:
9 from PySide2.QtCore import QObject, QAbstractListModel, Qt, QModelIndex, Property, Signal, Slot
10
11class MinosResultData(object):
12
13 def __init__(self):
14 self.image = None
15 self.list = []
16
17class MinosResultModel(QAbstractListModel):
18
19 LineText = Qt.UserRole
20 StartPosition = Qt.UserRole + 1
21
22 def __init__(self, image_controller , parent=None):
23 super(MinosResultModel, self).__init__(parent)
24 self._image_controller = image_controller
25 self._result_queue = [None] * 0
26 self._current_result = MinosResultData()
27 self._text = ""
28 self._processing_time = 0.0
29
30 self.notify_refresh.connect(self.refresh)
31
32 def roleNames(self):
33 roles = dict()
34 roles[MinosResultModel.LineText] = b"lineText"
35 roles[MinosResultModel.StartPosition] = b"startPosition"
36 return roles
37
38 def rowCount(self, parent = QModelIndex()):
39 return len(self._current_result.list)
40
41 def data(self, index, role = Qt.DisplayRole):
42 if not index.isValid():
43 return None
44 position, text = self._current_result.list[index.row()]
45 if role == MinosResultModel.LineText:
46 return text
47 elif role == MinosResultModel.StartPosition:
48 return cvb.ui.cvb_to_qt_point(position)
49 else:
50 return None
51
52 def push_data(self, data):
53 self._result_queue.append(data)
54 self.notify_refresh.emit()
55
56 @Slot()
57 def refresh(self):
58 if (len(self._result_queue) == 0):
59 return None
60
61
62 self._current_result = self._result_queue.pop()
63 self._result_queue.clear()
64 # refresh the model
65
66 self.layoutChanged.emit()
67 # refresh the text area
68 stream = ""
69 for i in range(len(self._current_result.list)):
70 stream += (self._current_result.list[i][1] + '\n')
71 self.set_search_result_text(stream)
72 # refresh the image
73 self._image_controller.refresh(self._current_result.image)
74
75 def set_search_result_text(self, text):
76 if (self._text != text):
77 self._text = text
78 self.notify_search_result_text.emit()
79
80 def set_processing_time(self, processing_time):
81 if (self._processing_time != processing_time):
82 self._processing_time = processing_time
83 self.notify_processing_time.emit()
84
85 def search_result_text(self):
86 return self._text
87
88 def processing_time(self):
89 return self._processing_time
90
91 notify_search_result_text = Signal()
92 notify_processing_time = Signal()
93 notify_refresh = Signal()
94
95 searchResultText = Property(str, search_result_text, notify=notify_search_result_text)
96 processingTime = Property(float, processing_time, notify=notify_processing_time)
97
PySide2.QtCore.QPointF cvb_to_qt_point(cvb.Point2D point)
Convenience converter for points.
Definition: __init__.py:381
1import QtQuick 2.3
2import CvbQuick 1.0 as CvbQuick
3import QtQuick.Controls 1.2
4import QtQuick.Layouts 1.3
5
6ApplicationWindow
7{
8 id: rootWin
9 visible: true
10 property int margin: 11
11 width: 1080
12 height: 720
13
14 ColumnLayout
15 {
16 id: mainLayout
17 anchors.fill: parent
18 anchors.margins: margin
19
20 CvbQuick.ImageView
21 {
22 id: view
23 image: mainImage
24 Layout.fillWidth: true
25 Layout.fillHeight: true
26
27 // Text result per line
28 Repeater
29 {
30 model : minosResultModel
31 delegate: TextLabel
32 {
33 imageView : view
34 imageX : startPosition.x
35 imageY : startPosition.y
36 text : lineText
37 }
38 }
39 }
40
41 RowLayout
42 {
43
44 TextArea
45 {
46 id: txtArea
47 text: minosResultModel.searchResultText
48 readOnly: true
49 wrapMode: TextEdit.WrapAnywhere
50 Layout.fillWidth: true
51 }
52
53 ColumnLayout
54 {
55 Layout.preferredWidth: 200
56
57 RowLayout
58 {
59
60 Layout.alignment: Qt.AlignCenter
61
62 Button
63 {
64 text: "Snap"
65 onClicked: minosSearch.snap()
66 }
67 }
68
69 Label
70 {
71 id: txtProcessingTime
72 text: "Processing time: " + String(minosResultModel.processingTime) + " ms"
73 font.bold: true
74 Layout.alignment: Qt.AlignCenter
75 }
76
77 }
78
79 }
80 }
81}
1import QtQuick 2.3
2import CvbQuick 1.0 as CvbQuick
3
4// Output result as overlay
5CvbQuick.ImageLabel
6{
7 id: label
8 property var text : ""
9 labelScale : 1
10
11 Text
12 {
13 text: label.text
14 color: "red"
15 font.pointSize : 4
16 }
17
18}