当前位置: 首页 > news >正文

Opencv项目实战:26 信用卡号码识别与类型判定

项目介绍

在日常生活中,信用卡的使用越来越普遍。本项目的主要目标是通过图像处理技术自动识别信用卡号码,并根据信用卡号码的第一个数字判定信用卡的类型(如Visa、MasterCard等)。项目结合了图像预处理、轮廓检测、模板匹配等技术,实现了从输入图像中提取信用卡号码并识别其类型的功能。

项目展示

Credit Card Type: MasterCard
Credit Card #: 5476767898765432

项目的代码与讲解

 1,读取模板图像并进行预处理

首先,读取模板图像,并将其转换为灰度图像和二值图像。然后,计算模板的轮廓并进行排序。

template_img = cv2.imread(template_path)
ref = cv2.cvtColor(template_img, cv2.COLOR_BGR2GRAY)
ref = cv2.threshold(ref, 10, 255, cv2.THRESH_BINARY_INV)[1]
refCnts, _ = cv2.findContours(ref, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
refCnts = contours.sort_contours(refCnts, method="left-to-right")[0]
digits = {}

for (i, c) in enumerate(refCnts):
    (x, y, w, h) = cv2.boundingRect(c)
    roi = ref[y:y + h, x:x + w]
    roi = cv2.resize(roi, (57, 88))
    digits[i] = roi

2,读取输入图像并进行预处理

读取输入图像,调整大小,通过灰度化、二值化以及形态学操作(如顶帽变换和闭操作),我们增强了图像的对比度并突出了数字部分。通过Sobel算子计算了图像的梯度,使得数字轮廓更加清晰。

image = cv2.imread(image_path)
width = 300
(h, w) = image.shape[:2]
r = width / float(w)
dim = (width, int(h * r))
image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

tophat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, rectKernel)
gradX = cv2.Sobel(tophat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
gradX = (255 * ((gradX - np.min(gradX)) / (np.max(gradX) - np.min(gradX))))
gradX = gradX.astype("uint8")

gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
thresh = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, sqKernel)

3,检测数字轮廓

在预处理后的图像中检测数字轮廓,并根据轮廓的宽高比和大小进行筛选。

cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

locs = []
for (i, c) in enumerate(cnts):
    (x, y, w, h) = cv2.boundingRect(c)
    ar = w / float(h)
    if ar > 2.5 and ar < 4.0 and (w > 40 and w < 55) and (h > 10 and h < 20):
        locs.append((x, y, w, h))

locs = sorted(locs, key=lambda x: x[0])

4,匹配数字模板

对每个检测到的数字区域,使用模板匹配算法与模板图像中的数字进行匹配,并通过计算匹配的相似度(即相关系数)来识别每个数字。使用cv2.matchTemplate方法,我们能够准确地从检测区域中识别出每个数字,并将其按顺序拼接成完整的信用卡号码。

output = []

for (i, (gX, gY, gW, gH)) in enumerate(locs):
    groupOutput = []
    group = gray[gY - 5:gY + gH + 5, gX - 5:gX + gW + 5]
    group = cv2.threshold(group, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    digitCnts, _ = cv2.findContours(group, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    digitCnts = contours.sort_contours(digitCnts, method="left-to-right")[0]

    for c in digitCnts:
        (x, y, w, h) = cv2.boundingRect(c)
        roi = group[y:y + h, x:x + w]
        roi = cv2.resize(roi, (57, 88))

        scores = []
        for (digit, digitROI) in digits.items():
            result = cv2.matchTemplate(roi, digitROI, cv2.TM_CCOEFF)
            (_, score, _, _) = cv2.minMaxLoc(result)
            scores.append(score)

        groupOutput.append(str(np.argmax(scores)))

    cv2.rectangle(image, (gX - 5, gY - 5), (gX + gW + 5, gY + gH + 5), (0, 0, 255), 1)
    cv2.putText(image, "".join(groupOutput), (gX, gY - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
    output.extend(groupOutput)

5,判定信用卡类型并显示结果

根据识别到的信用卡号码的第一个数字,判定信用卡的类型,并显示结果。

print(f"Credit Card Type: {FIRST_NUMBER[output[0]]}")
print("Credit Card #: {}".format("".join(output)))
display_image = np.hstack([origional_img, image])
cv2.imshow("Image", display_image)
cv2.waitKey(0)
cv2.destroyAllWindows()

完整代码如下所示:

import cv2
import numpy as np
from imutils import contours

image_path = r"./images/creditcard_5.png"


template_path = r"reference.png"
# 指定信用卡类型
FIRST_NUMBER = {
    "3": "American Express",
    "4": "Visa",
    "5": "MasterCard",
    "6": "Discover Card"
}

# 读取模板图像并进行预处理
template_img = cv2.imread(template_path)
ref = cv2.cvtColor(template_img, cv2.COLOR_BGR2GRAY)
ref = cv2.threshold(ref, 10, 255, cv2.THRESH_BINARY_INV)[1]

# 计算模板轮廓并进行排序
refCnts, _ = cv2.findContours(ref, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
refCnts = contours.sort_contours(refCnts, method="left-to-right")[0]
digits = {}

for (i, c) in enumerate(refCnts):
    (x, y, w, h) = cv2.boundingRect(c)
    roi = ref[y:y + h, x:x + w]
    roi = cv2.resize(roi, (57, 88))
    digits[i] = roi

# 初始化卷积核
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 3))
sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))

# 读取输入图像并进行预处理
image = cv2.imread(image_path)

width = 300
(h, w) = image.shape[:2]
r = width / float(w)
dim = (width, int(h * r))
image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
origional_img = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# 进行图像增强处理
tophat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, rectKernel)
gradX = cv2.Sobel(tophat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
gradX = (255 * ((gradX - np.min(gradX)) / (np.max(gradX) - np.min(gradX))))
gradX = gradX.astype("uint8")

# 通过闭操作连接数字并进行二值化处理
gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
thresh = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, sqKernel)

cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

locs = []
for (i, c) in enumerate(cnts):
    (x, y, w, h) = cv2.boundingRect(c)
    ar = w / float(h)
    if ar > 2.5 and ar < 4.0 and (w > 40 and w < 55) and (h > 10 and h < 20):
        locs.append((x, y, w, h))

locs = sorted(locs, key=lambda x: x[0])
output = []

for (i, (gX, gY, gW, gH)) in enumerate(locs):
    groupOutput = []
    group = gray[gY - 5:gY + gH + 5, gX - 5:gX + gW + 5]
    group = cv2.threshold(group, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    digitCnts, _ = cv2.findContours(group, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    digitCnts = contours.sort_contours(digitCnts, method="left-to-right")[0]

    for c in digitCnts:
        (x, y, w, h) = cv2.boundingRect(c)
        roi = group[y:y + h, x:x + w]
        roi = cv2.resize(roi, (57, 88))

        scores = []
        for (digit, digitROI) in digits.items():
            result = cv2.matchTemplate(roi, digitROI, cv2.TM_CCOEFF)
            (_, score, _, _) = cv2.minMaxLoc(result)
            scores.append(score)

        groupOutput.append(str(np.argmax(scores)))

    cv2.rectangle(image, (gX - 5, gY - 5), (gX + gW + 5, gY + gH + 5), (0, 0, 255), 1)
    cv2.putText(image, "".join(groupOutput), (gX, gY - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
    output.extend(groupOutput)

print(f"Credit Card Type: {FIRST_NUMBER[output[0]]}")
print("Credit Card #: {}".format("".join(output)))
display_image = np.hstack([origional_img, image])
# cv2.imwrite("test.png", display_image)
cv2.imshow("Image", display_image)
cv2.waitKey(0)
cv2.destroyAllWindows()

项目总结

本项目通过结合图像处理技术和模板匹配算法,成功实现了一个简单的信用卡号码识别与类型判定。

相关文章:

  • 自动化办公|通过xlwings进行excel格式设置
  • 防火墙综合实验
  • ✨1.HTML、CSS 和 JavaScript 是什么?
  • 深度学习笔记——LSTM
  • 自动化测试框架搭建-单次接口执行-三部曲
  • 深入理解 fnmatch 函数的实现
  • 04 redis数据类型
  • 《DeepSeek赋能工业互联网:解锁数据深度分析新姿势》
  • python常用库整理
  • 【实用技巧】云服务器+FRP搭建自己的远程控制向日葵
  • 最简单的难题——游戏英雄升级潜力评估
  • Flutter 跳转后不允许返回
  • 习题系列——数值分析与数值计算
  • el-table树状表格,默认展开第一个节点的每一层
  • 基因研究的“北极盲区”
  • 浏览器开发者工具(F12)查看请求的响应体内容显示”无法加载响应数据: No resource with given identifier found“
  • Linux网络 | 多路转接Reactor
  • HTB—OnlyHacks
  • 利用大模型deepseek搭建本地知识库并且实现 java 调用
  • DRF框架中viewsets.ModelViewSet、APIView区别与联系
  • 告别户口本!今天起婚姻登记实现全国通办
  • 悬疑推理联合书单|虫神山事件
  • 壹基金发布2024年度报告,公益项目惠及937万人次
  • 保证断电、碰撞等事故中车门系统能够开启!汽车车门把手将迎来强制性国家标准
  • 重庆党政代表团在沪考察,陈吉宁龚正与袁家军胡衡华共商两地深化合作工作
  • 现场丨“影像上海”启幕:串联摄影、电影与当代艺术