CVBpy 14.0
minos/QmlMinos
1import os, sys
2
3import cvb
4import cvb.ui
5from minos_result_model import MinosResultModel
6from minos_search import MinosSearch
7
8from PySide2.QtCore import QObject, QUrl, QAbstractListModel, Qt, QModelIndex
9from PySide2.QtQml import QQmlApplicationEngine, qmlRegisterType
10from PySide2.QtGui import QGuiApplication
11from PySide2.QtGui import QIcon
12
13
14if __name__ == "__main__":
15
16 app = QGuiApplication([])
17 app.setOrganizationName('STEMMER IMAGING')
18 app.setOrganizationDomain('https://www.stemmer-imaging.com/')
19 app.setApplicationName('Minos Python tutorial')
20
21 # tell Windows the correct AppUserModelID for this process (shows icon in the taskbar)
22 if sys.platform == 'win32':
23 import ctypes
24 myappid = u'stemmerimaging.commonvisionblox.pyminos.0'
25 ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
26
27 app.setWindowIcon(QIcon('Tutorial-Python_32x32.png'))
28
29 # open device
31 os.path.join(cvb.install_path(), "drivers", "CVMock.vin"),
32 cvb.AcquisitionStack.Vin) as device:
33
34 # setup QML interface objects
35 image_controller = cvb.ui.ImageController()
36 minos_result_model = MinosResultModel(image_controller)
37
38 # main search object
39 minos_search = MinosSearch(device.stream(), minos_result_model)
40
41 # register QML components for an image display
44
45 engine = QQmlApplicationEngine()
46 context = engine.rootContext()
47
48 # create a controller object to communicate with QML
49 context.setContextProperty("mainImage", image_controller)
50 # create a Minos result model to communicate with QML for overlays
51 context.setContextProperty("minosResultModel", minos_result_model)
52 # create a Minos search object to communicate with QML (grab, snap)
53 context.setContextProperty("minosSearch", minos_search)
54
55 # load main QML file
56 engine.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), "main.qml"))
57
58 # do a first snap at startup
59 minos_search.snap()
60
61 app.exec_()
Union[cvb.GenICamDevice, cvb.VinDevice, cvb.EmuDevice, cvb.VideoDevice, cvb.NonStreamingDevice] open(str provider, int acquisition_stack=cvb.AcquisitionStack.PreferVin)
Opens a device with the given provider and acquisition stack.
Definition: __init__.py:1327
Controller object for the QML image view item.
Definition: __init__.py:14
None register(cls, str uri="CvbQuick", int version_major=1, int version_minor=0, str qml_name="ImageLabel")
Convenience method to register this type or a derived type in QML.
Definition: __init__.py:122
None register(cls, str uri="CvbQuick", int version_major=1, int version_minor=0, str qml_name="ImageView")
Convenience method to register this type or a derived type in QML.
Definition: __init__.py:193
Common Vision Blox UI module for Python.
Definition: __init__.py:1
str install_path()
Directory Common Vision Blox has been installed to.
Definition: __init__.py:7146
1import os
2
3import cvb
4import cvb.minos
5
6from PySide2 import QtCore
7from PySide2.QtCore import QObject, Qt, Slot
8
9from minos_result_model import MinosResultModel, MinosResultData
10
11# Global params
12FIRST_AREA_RADIUS = 8.0
13LINE_RADIUS = 4
14WORD_HEIGHT = 4
15WORD_WIDTH = 4
16LINE_STEP_RADIUS = 8
17
18LINE_STEP = cvb.Point2D(0.0, 16.0)
19OCR_AOI = cvb.Area2D(cvb.Point2D(-3.0, -3.0), cvb.Point2D(3.0, -3.0), cvb.Point2D(-3.0, 3.0))
20
21
22class MinosSearch(QtCore.QObject):
23
24 def __init__(self, stream, minos_result_model):
25 super().__init__()
26 self._stream = stream
27 self._model = minos_result_model
28 self._classifier = cvb.minos.Classifier(os.path.join(cvb.install_path(), "tutorial", "Minos", "Images", "OCR", "Training Set", "Numbers.clf"))
29
30
31
32 @Slot()
33 def snap(self):
34 image, wait_status = self._stream.get_timed_snapshot(1000)
35 if (wait_status != cvb.WaitStatus.Ok):
36 return None
37
38 # Minos search in snap
39 result = MinosResultData()
40 result.image = image
41
42 stop_watch = cvb.StopWatch()
43 stop_watch.start()
44 result.list = self.search(image.planes[0])
45 self._model.set_processing_time(stop_watch.time_span)
46
47 self._model.push_data(result)
48
49 # The Minos search
50 def search(self, plane):
51 width = plane.map().width
52 height = plane.map().height
53
54 # STEP 1 finding the top most line
55 # create an area centered in horizontal direction
56 # size of the area is FIRST_AREA_WIDTH = ImageHeight
57 # the direction of the area is set to the bottom
58 # create an area from these values
59 search_aoi = cvb.Area2D(cvb.Point2D(width / 2 - FIRST_AREA_RADIUS, 0.0), cvb.Point2D(width / 2 + FIRST_AREA_RADIUS, 0.0), cvb.Point2D(width / 2 - FIRST_AREA_RADIUS, height - 1.0))
60
61 # search for the first object
62 search_result = self._classifier.search(plane, cvb.minos.SearchMode.FindFirst, search_aoi)
63
64
65 if (search_result.quality < 0.1):
66 return []
67
68 # now we found the first line of the text
69
70 # STEP 2 finding the beginning of a line
71 # try to find the first char in the line
72 # create an area centered around the first line
73 # beginning at the left side of the image
74 # size of the area is LINE_HEIGHT * dXPos
75 # the direction of the area is set to the right
76 # create an area from these values
77 position = search_result.position
78
79 line_aoi = cvb.Area2D(cvb.Point2D(0.0, position.y - LINE_RADIUS), cvb.Point2D(0.0, position.y + LINE_RADIUS),cvb.Point2D(position.x, position.y - LINE_RADIUS))
80
81 # do a line by line search
82 result_list = [None] * 0
83 self.search_line(plane, line_aoi, result_list)
84 return result_list
85
86
87 # Minos search per line
88 def search_line(self, plane, line_aoi, result_list):
89
90 # search for the first object
91 search_result = self._classifier.search(plane, cvb.minos.SearchMode.FindFirst, line_aoi)
92
93 if (search_result.quality == 0.0):
94 return None
95
96 # now we found the first char in the first line
97
98 # STEP 3 read the string
99 # save the x and y position of the line
100 # try to read the first word
101 # create an area left aligned the first char
102 # size of WORD_WIDTH * WORD_HEIGHT
103 # create an area from these values
104 line_start = search_result.position
105
106 read_aoi = cvb.Area2D(cvb.Point2D(line_start.x, line_start.y - WORD_HEIGHT / 2), cvb.Point2D(line_start.x + WORD_WIDTH / 2,line_start.y - WORD_HEIGHT / 2), cvb.Point2D(line_start.x, line_start.y + WORD_HEIGHT / 2))
107
108 # read the word
109 search_result = self._classifier.read(plane, read_aoi, OCR_AOI)
110
111 # save the result
112 stream = ""
113 for i in range(len(search_result)):
114 stream += search_result[i].name
115
116 result = (line_start, stream)
117 result_list.append(result)
118
119 line_start += LINE_STEP
120
121 # STEP 4 jump to the next line
122 # try to read the first word
123 # create an area left aligned the first char
124 # size of WORD_WIDTH * WORD_HEIGHT
125 # create an area from these values
126 next_line_aoi = cvb.Area2D(cvb.Point2D(line_start.x - LINE_STEP_RADIUS, line_start.y - LINE_STEP_RADIUS), cvb.Point2D(line_start.x + LINE_STEP_RADIUS, line_start.y - LINE_STEP_RADIUS), cvb.Point2D(line_start.x - LINE_STEP_RADIUS, line_start.y + LINE_STEP_RADIUS))
127
128 # next line (recursion should - no to deep)
129 self.search_line(plane, next_line_aoi, result_list)
130
131
Structure that represents an area of interest in the image.
Definition: __init__.py:376
Multi-purpose 2D vector class.
Definition: __init__.py:3406
Speed measurement object.
Definition: __init__.py:4597
Load a saved classifier from a file.
Definition: __init__.py:29
Common Vision Blox Minos module for Python.
Definition: __init__.py:1
1import os
2
3import cvb
4import cvb.ui
5
6from PySide2.QtCore import QObject, QAbstractListModel, Qt, QModelIndex, Property, Signal, Slot
7
8class MinosResultData(object):
9
10 def __init__(self):
11 self.image = None
12 self.list = []
13
14class MinosResultModel(QAbstractListModel):
15
16 LineText = Qt.UserRole
17 StartPosition = Qt.UserRole + 1
18
19 def __init__(self, image_controller , parent=None):
20 super(MinosResultModel, self).__init__(parent)
21 self._image_controller = image_controller
22 self._result_queue = [None] * 0
23 self._current_result = MinosResultData()
24 self._text = ""
25 self._processing_time = 0.0
26
27 self.notify_refresh.connect(self.refresh)
28
29 def roleNames(self):
30 roles = dict()
31 roles[MinosResultModel.LineText] = b"lineText"
32 roles[MinosResultModel.StartPosition] = b"startPosition"
33 return roles
34
35 def rowCount(self, parent = QModelIndex()):
36 return len(self._current_result.list)
37
38 def data(self, index, role = Qt.DisplayRole):
39 if not index.isValid():
40 return None
41 position, text = self._current_result.list[index.row()]
42 if role == MinosResultModel.LineText:
43 return text
44 elif role == MinosResultModel.StartPosition:
45 return cvb.ui.cvb_to_qt_point(position)
46 else:
47 return None
48
49 def push_data(self, data):
50 self._result_queue.append(data)
51 self.notify_refresh.emit()
52
53 @Slot()
54 def refresh(self):
55 if (len(self._result_queue) == 0):
56 return None
57
58
59 self._current_result = self._result_queue.pop()
60 self._result_queue.clear()
61 # refresh the model
62
63 self.layoutChanged.emit()
64 # refresh the text area
65 stream = ""
66 for i in range(len(self._current_result.list)):
67 stream += (self._current_result.list[i][1] + '\n')
68 self.set_search_result_text(stream)
69 # refresh the image
70 self._image_controller.refresh(self._current_result.image)
71
72 def set_search_result_text(self, text):
73 if (self._text != text):
74 self._text = text
75 self.notify_search_result_text.emit()
76
77 def set_processing_time(self, processing_time):
78 if (self._processing_time != processing_time):
79 self._processing_time = processing_time
80 self.notify_processing_time.emit()
81
82 def search_result_text(self):
83 return self._text
84
85 def processing_time(self):
86 return self._processing_time
87
88 notify_search_result_text = Signal()
89 notify_processing_time = Signal()
90 notify_refresh = Signal()
91
92 searchResultText = Property(str, search_result_text, notify=notify_search_result_text)
93 processingTime = Property(float, processing_time, notify=notify_processing_time)
94
PySide2.QtCore.QPointF cvb_to_qt_point(cvb.Point2D point)
Convenience converter for points.
Definition: __init__.py:381
1import QtQuick 2.3
2import CvbQuick 1.0 as CvbQuick
3import QtQuick.Controls 1.2
4import QtQuick.Layouts 1.3
5
6ApplicationWindow
7{
8 id: rootWin
9 visible: true
10 property int margin: 11
11 width: 1080
12 height: 720
13
14 ColumnLayout
15 {
16 id: mainLayout
17 anchors.fill: parent
18 anchors.margins: margin
19
20 CvbQuick.ImageView
21 {
22 id: view
23 image: mainImage
24 Layout.fillWidth: true
25 Layout.fillHeight: true
26
27 // Text result per line
28 Repeater
29 {
30 model : minosResultModel
31 delegate: TextLabel
32 {
33 imageView : view
34 imageX : startPosition.x
35 imageY : startPosition.y
36 text : lineText
37 }
38 }
39 }
40
41 RowLayout
42 {
43
44 TextArea
45 {
46 id: txtArea
47 text: minosResultModel.searchResultText
48 readOnly: true
49 wrapMode: TextEdit.WrapAnywhere
50 Layout.fillWidth: true
51 }
52
53 ColumnLayout
54 {
55 Layout.preferredWidth: 200
56
57 RowLayout
58 {
59
60 Layout.alignment: Qt.AlignCenter
61
62 Button
63 {
64 text: "Snap"
65 onClicked: minosSearch.snap()
66 }
67 }
68
69 Label
70 {
71 id: txtProcessingTime
72 text: "Processing time: " + String(minosResultModel.processingTime) + " ms"
73 font.bold: true
74 Layout.alignment: Qt.AlignCenter
75 }
76
77 }
78
79 }
80 }
81}
1import QtQuick 2.3
2import CvbQuick 1.0 as CvbQuick
3
4// Output result as overlay
5CvbQuick.ImageLabel
6{
7 id: label
8 property var text : ""
9 labelScale : 1
10
11 Text
12 {
13 text: label.text
14 color: "red"
15 font.pointSize : 4
16 }
17
18}