-
-
Save jkjung-avt/790a1410b91c170187f8dbdb8cc698c8 to your computer and use it in GitHub Desktop.
# test_camera.py | |
# | |
# Open an RTSP stream and feed image frames to 'openalpr' | |
# for real-time license plate recognition. | |
import numpy as np | |
import cv2 | |
from openalpr import Alpr | |
RTSP_SOURCE = 'rtsp://face:[email protected]:554/live.sdp' | |
WINDOW_NAME = 'openalpr' | |
FRAME_SKIP = 15 | |
def open_cam_rtsp(uri, width=1280, height=720, latency=2000): | |
gst_str = ('rtspsrc location={} latency={} ! ' | |
'rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! ' | |
'video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! ' | |
'videoconvert ! appsink').format(uri, latency, width, height) | |
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER) | |
def main(): | |
alpr = Alpr('tw', 'tx2.conf', '/usr/local/share/openalpr/runtime_data') | |
if not alpr.is_loaded(): | |
print('Error loading OpenALPR') | |
sys.exit(1) | |
alpr.set_top_n(3) | |
#alpr.set_default_region('new') | |
cap = open_cam_rtsp(RTSP_SOURCE) | |
if not cap.isOpened(): | |
alpr.unload() | |
sys.exit('Failed to open video file!') | |
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_AUTOSIZE) | |
cv2.setWindowTitle(WINDOW_NAME, 'OpenALPR video test') | |
_frame_number = 0 | |
while True: | |
ret_val, frame = cap.read() | |
if not ret_val: | |
print('VidepCapture.read() failed. Exiting...') | |
break | |
_frame_number += 1 | |
if _frame_number % FRAME_SKIP != 0: | |
continue | |
cv2.imshow(WINDOW_NAME, frame) | |
results = alpr.recognize_ndarray(frame) | |
for i, plate in enumerate(results['results']): | |
best_candidate = plate['candidates'][0] | |
print('Plate #{}: {:7s} ({:.2f}%)'.format(i, best_candidate['plate'].upper(), best_candidate['confidence'])) | |
if cv2.waitKey(1) == 27: | |
break | |
cv2.destroyAllWindows() | |
cap.release() | |
alpr.unload() | |
if __name__ == "__main__": | |
main() |
# test_video.py | |
# | |
# Open a video input file and feed each image frame to 'openalpr' | |
# for license plate recognition. | |
import numpy as np | |
import cv2 | |
from openalpr import Alpr | |
VIDEO_SOURCE = '/home/nvidia/Videos/alpr/2018-03-09-0850.mp4' | |
WINDOW_NAME = 'openalpr' | |
FRAME_SKIP = 15 | |
def main(): | |
alpr = Alpr('tw', 'tx2.conf', '/usr/local/share/openalpr/runtime_data') | |
if not alpr.is_loaded(): | |
print('Error loading OpenALPR') | |
sys.exit(1) | |
alpr.set_top_n(3) | |
#alpr.set_default_region('new') | |
cap = cv2.VideoCapture(VIDEO_SOURCE) | |
if not cap.isOpened(): | |
alpr.unload() | |
sys.exit('Failed to open video file!') | |
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_AUTOSIZE) | |
cv2.setWindowTitle(WINDOW_NAME, 'OpenALPR video test') | |
_frame_number = 0 | |
while True: | |
ret_val, frame = cap.read() | |
if not ret_val: | |
print('VidepCapture.read() failed. Exiting...') | |
break | |
_frame_number += 1 | |
if _frame_number % FRAME_SKIP != 0: | |
continue | |
cv2.imshow(WINDOW_NAME, frame) | |
results = alpr.recognize_ndarray(frame) | |
for i, plate in enumerate(results['results']): | |
best_candidate = plate['candidates'][0] | |
print('Plate #{}: {:7s} ({:.2f}%)'.format(i, best_candidate['plate'].upper(), best_candidate['confidence'])) | |
if cv2.waitKey(1) == 27: | |
break | |
cv2.destroyAllWindows() | |
cap.release() | |
alpr.unload() | |
if __name__ == "__main__": | |
main() |
; 25-45, 35-55, 45-65, 55-75, 65-85 | |
char_analysis_min_pct = 0.25 | |
char_analysis_height_range = 0.20 | |
char_analysis_height_step_size = 0.10 | |
char_analysis_height_num_steps = 5 | |
segmentation_min_speckle_height_percent = 0.3 | |
segmentation_min_box_width_px = 4 | |
segmentation_min_charheight_percent = 0.5; | |
segmentation_max_segment_width_percent_vs_average = 1.35; | |
plate_width_mm = 380.0 | |
plate_height_mm = 160.0 | |
multiline = 0 | |
char_height_mm = 94 | |
char_width_mm = 47 | |
char_whitespace_top_mm = 36 | |
char_whitespace_bot_mm = 26 | |
template_max_width_px = 152 | |
template_max_height_px = 64 | |
; Higher sensitivity means less lines | |
plateline_sensitivity_vertical = 25 | |
plateline_sensitivity_horizontal = 45 | |
; Regions smaller than this will be disqualified | |
min_plate_size_width_px = 80 | |
min_plate_size_height_px = 35 | |
; Results with fewer or more characters will be discarded | |
postprocess_min_characters = 4 | |
postprocess_max_characters = 7 | |
ocr_language = ltw | |
; Override for postprocess letters/numbers regex. | |
postprocess_regex_letters = [A-Z] | |
postprocess_regex_numbers = [0-9] | |
; Whether the plate is always dark letters on light background, light letters on dark background, or both | |
; value can be either always, never, or auto | |
invert = auto |
; Specify the path to the runtime data directory | |
runtime_dir = /usr/local/share/openalpr/runtime_data | |
ocr_img_size_percent = 1.33333333 | |
state_id_img_size_percent = 2.0 | |
; Calibrating your camera improves detection accuracy in cases where vehicle plates are captured at a steep angle | |
; Use the openalpr-utils-calibrate utility to calibrate your fixed camera to adjust for an angle | |
; Once done, update the prewarp config with the values obtained from the tool | |
;prewarp = | |
;prewarp = planar,1280.000000,720.000000,0.000850,0.000750,0.080000,0.975000,0.815000,0.000000,0.000000 | |
; This is for 0309 photos | |
; prewarp = planar,1280.000000,720.000000,0.000600,0.000900,0.060000,1.000000,1.000000,0.000000,0.000000 | |
; This is for 0313 photos | |
prewarp = planar,1280.000000,720.000000,0.000550,0.000750,0.130000,1.000000,1.000000,0.000000,0.000000 | |
; detection will ignore plates that are too large. This is a good efficiency technique to use if the | |
; plates are going to be a fixed distance away from the camera (e.g., you will never see plates that fill | |
; up the entire image | |
max_plate_width_percent = 15 | |
max_plate_height_percent = 15 | |
; detection_iteration_increase is the percentage that the LBP frame increases each iteration. | |
; It must be greater than 1.0. A value of 1.01 means increase by 1%, 1.10 increases it by 10% each time. | |
; So a 1% increase would be ~10x slower than 10% to process, but it has a higher chance of landing | |
; directly on the plate and getting a strong detection | |
detection_iteration_increase = 1.05 | |
; The minimum detection strength determines how sure the detection algorithm must be before signaling that | |
; a plate region exists. Technically this corresponds to LBP nearest neighbors (e.g., how many detections | |
; are clustered around the same area). For example, 2 = very lenient, 9 = very strict. | |
detection_strictness = 3 | |
; The detection doesn't necessarily need an extremely high resolution image in order to detect plates | |
; Using a smaller input image should still find the plates and will do it faster | |
; Tweaking the max_detection_input values will resize the input image if it is larger than these sizes | |
; max_detection_input_width/height are specified in pixels | |
max_detection_input_width = 1920 | |
max_detection_input_height = 1080 | |
; detector is the technique used to find license plate regions in an image. Value can be set to | |
; lbpcpu - default LBP-based detector uses the system CPU | |
; lbpgpu - LBP-based detector that uses Nvidia GPU to increase recognition speed. | |
; lbpopencl - LBP-based detector that uses OpenCL GPU to increase recognition speed. Requires OpenCV 3.0 | |
; morphcpu - Experimental detector that detects white rectangles in an image. Does not require training. | |
;detector = lbpcpu | |
detector = lbpgpu | |
; If set to true, all results must match a postprocess text pattern if a pattern is available. | |
; If not, the result is disqualified. | |
must_match_pattern = 1 | |
; Bypasses plate detection. If this is set to 1, the library assumes that each region provided is a likely plate area. | |
skip_detection = 0 | |
; Specifies the full path to an image file that constrains the detection area. Only the plate regions allowed through the mask | |
; will be analyzed. The mask image must match the resolution of your image to be analyzed. The mask is black and white. | |
; Black areas will be ignored, white areas will be searched. An empty value means no mask (scan the entire image) | |
detection_mask_image = | |
; OpenALPR can scan the same image multiple times with different randomization. Setting this to a value larger than | |
; 1 may increase accuracy, but will increase processing time linearly (e.g., analysis_count = 3 is 3x slower) | |
analysis_count = 1 | |
; OpenALPR detects high-contrast plate crops and uses an alternative edge detection technique. Setting this to 0.0 | |
; would classify ALL images as high-contrast, setting it to 1.0 would classify no images as high-contrast. | |
contrast_detection_threshold = 0.9 | |
max_plate_angle_degrees = 15 | |
ocr_min_font_point = 6 | |
; Minimum OCR confidence percent to consider. | |
postprocess_min_confidence = 80 | |
; Any OCR character lower than this will also add an equally likely | |
; chance that the character is incorrect and will be skipped. Value is a confidence percent | |
postprocess_confidence_skip_level = 80 | |
debug_general = 0 | |
debug_timing = 0 | |
debug_detector = 0 | |
debug_prewarp = 0 | |
debug_state_id = 0 | |
debug_plate_lines = 0 | |
debug_plate_corners = 0 | |
debug_char_segment = 0 | |
debug_char_analysis = 0 | |
debug_color_filter = 0 | |
debug_ocr = 0 | |
debug_postprocess = 0 | |
debug_show_images = 0 | |
debug_pause_on_frame = 0 |
you search the path right .? tape this command find / -path *runtime_data*
, and copy the result in line 16
of file openalpr_video.py
.
you search the path right .? tape this command
find / -path *runtime_data*
, and copy the result inline 16
of fileopenalpr_video.py
.
When I try to tape the above mentioned command,It just thrown Invalid Switch.
Will you please mention the command to find the path that need to mention in line 16 of file"openalpr_video.py" ?
did you follow this steps in this link : https://jkjung-avt.github.io/openalpr-on-tx2/ , .?
in step 3: you will find openalpr directory , after taping cd openalpr
write command ls
you should find : runtime_data
HI Mr, @jkjung-avt i want to cropping each vehicle and save it in path how can be do that.
@zongoalbert, sounds like you need to use an object detector to detect vehicles (as bounding boxes) and then crop images with the bounding boxes.
you can find the original video here : https://drive.google.com/drive/u/0/folders/10tqL9tIB5AmNJrGXIBPhkOp8-F_paASs?fbclid=IwAR1Ht7Bw5hyva5OaThRtoyqFHgXr02erJFYA9D-GAD2wAHbiXonl4bjWdaY
It's not guaranteed that 4K camera would produce clear pictures. You could use some photo viewer/editor application to check the original 4K image. Try zoom in the vehicle of interest and check whether the license plate is clear and readable first.
Yes SIR i do zoom and screen shout with tool windows os , AND i lance command line : alpr -c eu /path/imagescreened.png
, its work , t think the problem is about cropping not resolution of video , could you please add cropping rear of vehicle or croping ALPR in your code?
please Sir could you take a look of the code ? .
Cropping images is very simple. Please reference this post on StackOverflow.
https://stackoverflow.com/questions/15589517/how-to-crop-an-image-in-opencv-using-python
i try you installation and guide . i execute all fix but when i try show a error
Traceback (most recent call last):
File "test.py", line 66, in
main()
File "test.py", line 52, in main
results = alpr.recognize_ndarray(frame)
AttributeError: Alpr instance has no attribute 'recognize_ndarray'
can help me
Which version of Python are you using?
Hello,
In above programs,How can we specify the path of runtime_data? What it means ? Is anyone help me out?