-
-
Save jkjung-avt/790a1410b91c170187f8dbdb8cc698c8 to your computer and use it in GitHub Desktop.
# test_camera.py | |
# | |
# Open an RTSP stream and feed image frames to 'openalpr' | |
# for real-time license plate recognition. | |
import numpy as np | |
import cv2 | |
from openalpr import Alpr | |
RTSP_SOURCE = 'rtsp://face:[email protected]:554/live.sdp' | |
WINDOW_NAME = 'openalpr' | |
FRAME_SKIP = 15 | |
def open_cam_rtsp(uri, width=1280, height=720, latency=2000): | |
gst_str = ('rtspsrc location={} latency={} ! ' | |
'rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! ' | |
'video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! ' | |
'videoconvert ! appsink').format(uri, latency, width, height) | |
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER) | |
def main(): | |
alpr = Alpr('tw', 'tx2.conf', '/usr/local/share/openalpr/runtime_data') | |
if not alpr.is_loaded(): | |
print('Error loading OpenALPR') | |
sys.exit(1) | |
alpr.set_top_n(3) | |
#alpr.set_default_region('new') | |
cap = open_cam_rtsp(RTSP_SOURCE) | |
if not cap.isOpened(): | |
alpr.unload() | |
sys.exit('Failed to open video file!') | |
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_AUTOSIZE) | |
cv2.setWindowTitle(WINDOW_NAME, 'OpenALPR video test') | |
_frame_number = 0 | |
while True: | |
ret_val, frame = cap.read() | |
if not ret_val: | |
print('VidepCapture.read() failed. Exiting...') | |
break | |
_frame_number += 1 | |
if _frame_number % FRAME_SKIP != 0: | |
continue | |
cv2.imshow(WINDOW_NAME, frame) | |
results = alpr.recognize_ndarray(frame) | |
for i, plate in enumerate(results['results']): | |
best_candidate = plate['candidates'][0] | |
print('Plate #{}: {:7s} ({:.2f}%)'.format(i, best_candidate['plate'].upper(), best_candidate['confidence'])) | |
if cv2.waitKey(1) == 27: | |
break | |
cv2.destroyAllWindows() | |
cap.release() | |
alpr.unload() | |
if __name__ == "__main__": | |
main() |
# test_video.py | |
# | |
# Open a video input file and feed each image frame to 'openalpr' | |
# for license plate recognition. | |
import numpy as np | |
import cv2 | |
from openalpr import Alpr | |
VIDEO_SOURCE = '/home/nvidia/Videos/alpr/2018-03-09-0850.mp4' | |
WINDOW_NAME = 'openalpr' | |
FRAME_SKIP = 15 | |
def main(): | |
alpr = Alpr('tw', 'tx2.conf', '/usr/local/share/openalpr/runtime_data') | |
if not alpr.is_loaded(): | |
print('Error loading OpenALPR') | |
sys.exit(1) | |
alpr.set_top_n(3) | |
#alpr.set_default_region('new') | |
cap = cv2.VideoCapture(VIDEO_SOURCE) | |
if not cap.isOpened(): | |
alpr.unload() | |
sys.exit('Failed to open video file!') | |
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_AUTOSIZE) | |
cv2.setWindowTitle(WINDOW_NAME, 'OpenALPR video test') | |
_frame_number = 0 | |
while True: | |
ret_val, frame = cap.read() | |
if not ret_val: | |
print('VidepCapture.read() failed. Exiting...') | |
break | |
_frame_number += 1 | |
if _frame_number % FRAME_SKIP != 0: | |
continue | |
cv2.imshow(WINDOW_NAME, frame) | |
results = alpr.recognize_ndarray(frame) | |
for i, plate in enumerate(results['results']): | |
best_candidate = plate['candidates'][0] | |
print('Plate #{}: {:7s} ({:.2f}%)'.format(i, best_candidate['plate'].upper(), best_candidate['confidence'])) | |
if cv2.waitKey(1) == 27: | |
break | |
cv2.destroyAllWindows() | |
cap.release() | |
alpr.unload() | |
if __name__ == "__main__": | |
main() |
; 25-45, 35-55, 45-65, 55-75, 65-85 | |
char_analysis_min_pct = 0.25 | |
char_analysis_height_range = 0.20 | |
char_analysis_height_step_size = 0.10 | |
char_analysis_height_num_steps = 5 | |
segmentation_min_speckle_height_percent = 0.3 | |
segmentation_min_box_width_px = 4 | |
segmentation_min_charheight_percent = 0.5; | |
segmentation_max_segment_width_percent_vs_average = 1.35; | |
plate_width_mm = 380.0 | |
plate_height_mm = 160.0 | |
multiline = 0 | |
char_height_mm = 94 | |
char_width_mm = 47 | |
char_whitespace_top_mm = 36 | |
char_whitespace_bot_mm = 26 | |
template_max_width_px = 152 | |
template_max_height_px = 64 | |
; Higher sensitivity means less lines | |
plateline_sensitivity_vertical = 25 | |
plateline_sensitivity_horizontal = 45 | |
; Regions smaller than this will be disqualified | |
min_plate_size_width_px = 80 | |
min_plate_size_height_px = 35 | |
; Results with fewer or more characters will be discarded | |
postprocess_min_characters = 4 | |
postprocess_max_characters = 7 | |
ocr_language = ltw | |
; Override for postprocess letters/numbers regex. | |
postprocess_regex_letters = [A-Z] | |
postprocess_regex_numbers = [0-9] | |
; Whether the plate is always dark letters on light background, light letters on dark background, or both | |
; value can be either always, never, or auto | |
invert = auto |
; Specify the path to the runtime data directory | |
runtime_dir = /usr/local/share/openalpr/runtime_data | |
ocr_img_size_percent = 1.33333333 | |
state_id_img_size_percent = 2.0 | |
; Calibrating your camera improves detection accuracy in cases where vehicle plates are captured at a steep angle | |
; Use the openalpr-utils-calibrate utility to calibrate your fixed camera to adjust for an angle | |
; Once done, update the prewarp config with the values obtained from the tool | |
;prewarp = | |
;prewarp = planar,1280.000000,720.000000,0.000850,0.000750,0.080000,0.975000,0.815000,0.000000,0.000000 | |
; This is for 0309 photos | |
; prewarp = planar,1280.000000,720.000000,0.000600,0.000900,0.060000,1.000000,1.000000,0.000000,0.000000 | |
; This is for 0313 photos | |
prewarp = planar,1280.000000,720.000000,0.000550,0.000750,0.130000,1.000000,1.000000,0.000000,0.000000 | |
; detection will ignore plates that are too large. This is a good efficiency technique to use if the | |
; plates are going to be a fixed distance away from the camera (e.g., you will never see plates that fill | |
; up the entire image | |
max_plate_width_percent = 15 | |
max_plate_height_percent = 15 | |
; detection_iteration_increase is the percentage that the LBP frame increases each iteration. | |
; It must be greater than 1.0. A value of 1.01 means increase by 1%, 1.10 increases it by 10% each time. | |
; So a 1% increase would be ~10x slower than 10% to process, but it has a higher chance of landing | |
; directly on the plate and getting a strong detection | |
detection_iteration_increase = 1.05 | |
; The minimum detection strength determines how sure the detection algorithm must be before signaling that | |
; a plate region exists. Technically this corresponds to LBP nearest neighbors (e.g., how many detections | |
; are clustered around the same area). For example, 2 = very lenient, 9 = very strict. | |
detection_strictness = 3 | |
; The detection doesn't necessarily need an extremely high resolution image in order to detect plates | |
; Using a smaller input image should still find the plates and will do it faster | |
; Tweaking the max_detection_input values will resize the input image if it is larger than these sizes | |
; max_detection_input_width/height are specified in pixels | |
max_detection_input_width = 1920 | |
max_detection_input_height = 1080 | |
; detector is the technique used to find license plate regions in an image. Value can be set to | |
; lbpcpu - default LBP-based detector uses the system CPU | |
; lbpgpu - LBP-based detector that uses Nvidia GPU to increase recognition speed. | |
; lbpopencl - LBP-based detector that uses OpenCL GPU to increase recognition speed. Requires OpenCV 3.0 | |
; morphcpu - Experimental detector that detects white rectangles in an image. Does not require training. | |
;detector = lbpcpu | |
detector = lbpgpu | |
; If set to true, all results must match a postprocess text pattern if a pattern is available. | |
; If not, the result is disqualified. | |
must_match_pattern = 1 | |
; Bypasses plate detection. If this is set to 1, the library assumes that each region provided is a likely plate area. | |
skip_detection = 0 | |
; Specifies the full path to an image file that constrains the detection area. Only the plate regions allowed through the mask | |
; will be analyzed. The mask image must match the resolution of your image to be analyzed. The mask is black and white. | |
; Black areas will be ignored, white areas will be searched. An empty value means no mask (scan the entire image) | |
detection_mask_image = | |
; OpenALPR can scan the same image multiple times with different randomization. Setting this to a value larger than | |
; 1 may increase accuracy, but will increase processing time linearly (e.g., analysis_count = 3 is 3x slower) | |
analysis_count = 1 | |
; OpenALPR detects high-contrast plate crops and uses an alternative edge detection technique. Setting this to 0.0 | |
; would classify ALL images as high-contrast, setting it to 1.0 would classify no images as high-contrast. | |
contrast_detection_threshold = 0.9 | |
max_plate_angle_degrees = 15 | |
ocr_min_font_point = 6 | |
; Minimum OCR confidence percent to consider. | |
postprocess_min_confidence = 80 | |
; Any OCR character lower than this will also add an equally likely | |
; chance that the character is incorrect and will be skipped. Value is a confidence percent | |
postprocess_confidence_skip_level = 80 | |
debug_general = 0 | |
debug_timing = 0 | |
debug_detector = 0 | |
debug_prewarp = 0 | |
debug_state_id = 0 | |
debug_plate_lines = 0 | |
debug_plate_corners = 0 | |
debug_char_segment = 0 | |
debug_char_analysis = 0 | |
debug_color_filter = 0 | |
debug_ocr = 0 | |
debug_postprocess = 0 | |
debug_show_images = 0 | |
debug_pause_on_frame = 0 |
Hey , What's upp . I just started on charm and taking some errors . If you have time could you take a look . @jkjung-avt
mport sys; print('Python %s on %s' % (sys.version, sys.platform))
sys.path.extend(['C:\plaka', 'C:/plaka'])
PyDev console: starting.
Python 3.6.5 (v3.6.5:f59c0932b4, Mar 28 2018, 17:00:18) [MSC v.1900 64 bit (AMD64)] on win32
runfile('C:/Users/tugra/OneDrive/Masaüstü/790a1410b91c170187f8dbdb8cc698c8-ef010bd7eda0d3a25bd4d6f6abd5503f0752b19b/openalpr_camera.py', wdir='C:/Users/tugra/OneDrive/Masaüstü/790a1410b91c170187f8dbdb8cc698c8-ef010bd7eda0d3a25bd4d6f6abd5503f0752b19b')
Traceback (most recent call last):
File "C:\plaka\venv\lib\site-packages\openalpr\openalpr.py", line 57, in init
self.openalprpy_lib = ctypes.cdll.LoadLibrary("libopenalprpy.dll")
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python36_64\lib\ctypes_init.py", line 426, in LoadLibrary
return self.dlltype(name)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python36_64\lib\ctypes_init.py", line 348, in init
self._handle = _dlopen(self._name, mode)
OSError: [WinError 126] The specified module could not be found
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "", line 1, in
File "C:\Program Files\JetBrains\PyCharm 2018.2.3\helpers\pydev_pydev_bundle\pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "C:\Program Files\JetBrains\PyCharm 2018.2.3\helpers\pydev_pydev_imps_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "C:/Users/tugra/OneDrive/Masaüstü/790a1410b91c170187f8dbdb8cc698c8-ef010bd7eda0d3a25bd4d6f6abd5503f0752b19b/openalpr_camera.py", line 63, in
main()
File "C:/Users/tugra/OneDrive/Masaüstü/790a1410b91c170187f8dbdb8cc698c8-ef010bd7eda0d3a25bd4d6f6abd5503f0752b19b/openalpr_camera.py", line 23, in main
alpr = Alpr('tw', 'tx2.conf', '/usr/local/share/openalpr/runtime_data')
File "C:\plaka\venv\lib\site-packages\openalpr\openalpr.py", line 67, in init
raise nex
OSError: Unable to locate the OpenALPR library. Please make sure that OpenALPR is properly installed on your system and that the libraries are in the appropriate paths.
Btw , I added libs on charm .. :/
@ZIKO94ZIKO You could refer to OpenALPR's documentation, "Training OCR", for how to train the OCR model to recognize new characters.
@AmbrosiaOfBia I don't have any experience working with PyCharm on Windows. I think I won't be able to help you solve the problem. Anyway, the error message clearly says the problem is "Unable to locate the OpenALPR library" (most likely this file: libopenalprpy.dll).
sir finally it's work ^_^ Thank you so much for your help!! could you please send me your video for testing VIDEO_SOURCE = 2018-03-09-0850.mp4'
@ZIKO94ZIKO Good to hear your progress. However, I cannot share the requested video file with you. It was a video recording taken from our company's parking lot. Besides, it was for testing Taiwanese vehicle license plates so it probably doesn't help you much.
@jkjung ! i understand your situation sir ^_^ thank's !
The script openalpr_video.py work well , but other scripts openalpr_camera.py give me that msg :
**zakaria@ZakariaUbunto:~/Desktop/aee$** python openalpr_camera.py Error in pixReadMemTiff: function not present Error in pixReadMem: tiff: no pix returned Error in pixaGenerateFontFromString: pix not made Error in bmfCreate: font pixa not made Failed to open video file!
i add my @address Ip and user password : RTSP_SOURCE = 'rtsp://admin:[email protected]:554/live.sdp'
@ZIKO94ZIKO The 10.15.19.201 RTSP source (IP CAM) is a device internal to our company. You won't be able to access from the outside. To test the 'openalpr_camera.py' script, you'll have to set up your own RTSP source and modify the code accordingly.
@jkjung-avt yes the @ip of the camera is 192.162.254.4 , i send ping its work successfully , then the RTSP_SOURCE = 'rtsp://admin:[email protected]:554/live.sdp' , but when i run the programme i have the some issue :
z(base) root@ZakariaUbunto:/home/zakaria/Desktop/aee# python openalpr_camera.py Error in pixReadMemTiff: function not present Error in pixReadMem: tiff: no pix returned Error in pixaGenerateFontFromString: pix not made Error in bmfCreate: font pixa not made Failed to open video file!
@ZIKO94ZIKO I guess you are not testing this on a NVIDIA Jetson TX2 platform. In that case, you'll have to modify the gstreamer pipeline string. For example, you could try the following. In case it does not work, you'll then have to research which gstreamer pipeline could work (decode the RTSP stream properly) on your platform.
def open_cam_rtsp(uri, latency=2000):
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! '
'appsink').format(uri, latency)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
I change the line : cap = open_cam_rtsp(RTSP_SOURCE)
by this cap = cv2.VideoCapture(RTSP_SOURCE)
its work well ,tell me Sir what's the deference between the functions,?
What you did is let opencv (VideoCapture) automatically decide how to decode your RTSP stream. The advantage of this approach is it's very convenient. You are fortunate that it just works on your platform. So, on the other hand, the disadvantage of this approach is that it might not work or maybe opencv would use a video decoding pipeline which is not the best alternative on your platform.
I specified the exact gstreamer pipeline I wanted to use when initiating cv2.VIdeoCapture(), because I'd like to make sure I used the hardware H.264 decoder ('omxh264dec') on the Jetson TX2 platform.
Thank you so much for your help and explanation.
have other issue Sir : I find a code that can be detect the speed in reel time using a camera , could you tell me how can be intergrate your code in this project
you can find the project here [https://github.com/pageauc/speed-camera]
@ZIKO94ZIKO Sorry, I don't have time to study the 'speed-camera' code. Please seek other's help on that.
Hey, getting the same errors as most other users are whenever I try running either file it returns sys.exit('Failed to open video file!')
.
I've tried using both functions this way cap = open_cam_rtsp(RTSP_SOURCE) and cap = cv2.VideoCapture(RTSP_SOURCE)
both unfortunately aren't working
The link below is the is the RTSP_SOURCE rtmp://192.168.0.123/live/**secrect key**
, I tested the stream itself by installing OBS Studio and streaming it via VLC media player and everything worked fine.
Has anyone managed to solve the issue at hand? If so any help is much appreciated, thanks!
Hey all, I have a few questions in regards to standard practices with license plate recognition.
When running the openalpr_video.py
it scans the entire frame and does it on a frame by frame basis and outputs the values. I wrote some code to do it every 5 frames instead but that does not really fix the problem. Considering I'd like to store it to a csv file, how would you go about capturing a single licenses plate and distinguishing one license plate from another ?
Hello,
In above programs,How can we specify the path of runtime_data? What it means ? Is anyone help me out?
you search the path right .? tape this command find / -path *runtime_data*
, and copy the result in line 16
of file openalpr_video.py
.
you search the path right .? tape this command
find / -path *runtime_data*
, and copy the result inline 16
of fileopenalpr_video.py
.
When I try to tape the above mentioned command,It just thrown Invalid Switch.
Will you please mention the command to find the path that need to mention in line 16 of file"openalpr_video.py" ?
did you follow this steps in this link : https://jkjung-avt.github.io/openalpr-on-tx2/ , .?
in step 3: you will find openalpr directory , after taping cd openalpr
write command ls
you should find : runtime_data
HI Mr, @jkjung-avt i want to cropping each vehicle and save it in path how can be do that.
@zongoalbert, sounds like you need to use an object detector to detect vehicles (as bounding boxes) and then crop images with the bounding boxes.
you can find the original video here : https://drive.google.com/drive/u/0/folders/10tqL9tIB5AmNJrGXIBPhkOp8-F_paASs?fbclid=IwAR1Ht7Bw5hyva5OaThRtoyqFHgXr02erJFYA9D-GAD2wAHbiXonl4bjWdaY
It's not guaranteed that 4K camera would produce clear pictures. You could use some photo viewer/editor application to check the original 4K image. Try zoom in the vehicle of interest and check whether the license plate is clear and readable first.
Yes SIR i do zoom and screen shout with tool windows os , AND i lance command line : alpr -c eu /path/imagescreened.png
, its work , t think the problem is about cropping not resolution of video , could you please add cropping rear of vehicle or croping ALPR in your code?
please Sir could you take a look of the code ? .
Cropping images is very simple. Please reference this post on StackOverflow.
https://stackoverflow.com/questions/15589517/how-to-crop-an-image-in-opencv-using-python
i try you installation and guide . i execute all fix but when i try show a error
Traceback (most recent call last):
File "test.py", line 66, in
main()
File "test.py", line 52, in main
results = alpr.recognize_ndarray(frame)
AttributeError: Alpr instance has no attribute 'recognize_ndarray'
can help me
Which version of Python are you using?
OK sir , i wanna detect Moroccan License Plate Recognition (LPR) using OpenCV and Tesseract
(number , Arabic letters , number )
how can i modify your code to get all character of plats?