当前位置: 首页 > news >正文

前端图像视频实时检测

需求:在目标检测时,我们要求前端能够将后端检测的结果实时渲染在图片或者视频上。图片是静态的,只需要渲染一次;而视频是动态的,播放时需要根据帧数来实时渲染标注框,可以想象视频就是由一张张图片播放的效果。

1.前端技术:sse、canvas

2.模型:yolov11


效果图

图片检测:

视频检测:


步骤1:预览图片和视频,canvas绘制标注框

<div class="image-list"><div v-if="currentMedia" class="btn" @click="prevPage"><kp-icon name="LeftOutlined" style="font-size: 30px"></kp-icon></div><div v-if="currentMedia" class="media"><div class="workspace"><!--图片预览--><div v-if="isImage" class="media-container"><img ref="imgPreviewRef" style="height: 100%; overflow: scroll" alt="" /><canvas id="imgCanvasOverlay"></canvas></div><!--视频预览--><div v-else class="media-container"><video id="videoElement" controls height="90%"></video><canvas id="canvasOverlay"></canvas></div></div></div><empty v-else title="暂无图像或视频" /><div v-if="currentMedia" class="btn" @click="nextPage"><kp-icon name="RightOutlined" style="font-size: 30px"></kp-icon></div>
</div>

步骤2:前端选择sse,在页面加载时与后端建立连接

onMounted(() => {clientId.value = Math.random().toString(36).substring(2, 15)let sseUrl = `${shipDetectUrl}/sse/${clientId.value}`sseConnection.value = new EventSource(sseUrl)           sseConnection.value.addEventListener('message', event => {try {let data = JSON.parse(event.data)if (data && data.filename === currentMedia.value?.fileName) {// 情况一:图片if (!data['is_video']) {// 缩放比例let scale = data.height / imgPreviewRef.value.getBoundingClientRect().heightdata.data.map((item, index) => {imgAnnotation.value.push([item.det_res[0] / scale,item.det_res[1] / scale,(item.det_res[2] - item.det_res[0]) / scale,(item.det_res[3] - item.det_res[1]) / scale,item.det_res[4],item.det_res[5],item.cls_res,index,])})drawImgAnnotations()} else if (data['is_video']) {// 情况二:视频if (videoCheckState.value === 0) {videoCheckState.value = 1} else if (data['video_end']) {videoCheckState.value = 2}frameRate.value = data.fps// 缩放比例let scale = data.height / video.value.getBoundingClientRect().height let annotationData = data.data.map((item, index) => {return [item.det_res[0] / scale,item.det_res[1] / scale,(item.det_res[2] - item.det_res[0]) / scale,(item.det_res[3] - item.det_res[1]) / scale,item.det_res[4],item.det_res[5],item.cls_res,index,]})annotations.set(data.frame, annotationData)}} else if (data['batch_end']) {batchLoading.value = false// 情况三:批量检测checkState.value = 2message.success('已完成批量检测!')if (!isImage.value && video.value.paused) video.value.play()handleDetection()}} catch (error) {console.error('Error parsing SSE data:', error)}})sseConnection.value.addEventListener('error', error => {console.error('SSE connection error:', error)})
})

步骤3:图片或者视频加载完成,立即调用检测接口,检测结果在sse连接中返回

// 预览图像
function previewImg() {nextTick(() => {imageCanvas.value = document.getElementById('imgCanvasOverlay')if (imageCanvas.value) {imageCanvasCtx.value = imageCanvas.value.getContext('2d')}imgPreviewRef.value.src = `/_api/detectionDataset/preview?filePath=${currentMedia.value.filePath.replaceAll('\\', '/')}`imgPreviewRef.value.onload = () => {imgAnnotation.value = []clickedBoxId.value = nullimageCanvas.value.width = imgPreviewRef.value.widthimageCanvas.value.height = imgPreviewRef.value.heighthandleDetection()}imgPreviewRef.value.addEventListener('click', handleCanvasClick)})
}const isFirst = ref(false)
const isRePlaying = ref(false) // 是否暂停后重新开始播放 
// 预览视频
function previewVideo() {annotations.clear()nextTick(() => {video.value = document.getElementById('videoElement')videoCanvas.value = document.getElementById('canvasOverlay')if (videoCanvas.value) {videoCanvasCtx.value = videoCanvas.value.getContext('2d')}if (video.value && currentMedia.value) {video.value.src = currentMedia.value.srcvideo.value.addEventListener('canplay', () => {if (!isFirst.value && video.value.currentTime == 0) {handleDetection()video.value.playbackRate = 0.5video.value.play()animationLoop() // 开始绘制循环isFirst.value = true}})video.value.addEventListener('play', () => {console.log('play开始播放')isFirst.value = falseclickedBoxId.value = null})video.value.addEventListener('pause', () => {if (!isRePlaying.value) {console.log('pause暂停播放')stopVideoDetection()}isRePlaying.value = true})video.value.addEventListener('playing', () => {console.log('暂停后重新开始播放')if (video.value.currentTime !== 0 && isRePlaying.value) {handleDetection()}isRePlaying.value = false})video.value.addEventListener('loadedmetadata', resizeCanvas)window.addEventListener('resize', resizeCanvas)video.value.addEventListener('click', handleCanvasClick)}})
}// 调用检测接口
function handleDetection() {let param = {source: currentMedia.value.filePath,model: modelType.value, // 模型client_id: clientId.value,}if (!isImage.value) {param.frame = Math.floor(video.value.currentTime * frameRate.value)}localImageVideo.startDetect(param).then(res => {taskId.value = res.data.task_id})
}

步骤4:图片绘制一次标注框;视频则按帧绘制标注框,因为返回结果是以帧数为单位(帧数 = 当前时间 * 帧率)

// 绘制图片的标注框
function drawImgAnnotations() {if (imageCanvasCtx.value)imageCanvasCtx.value.clearRect(0, 0, imageCanvas.value.width, imageCanvas.value.height)detectionData.value = []imgAnnotation.value.forEach(item => {const [x1, y1, width, height, level, type, cls_res, id] = itemdetectionData.value.push({type: type,level: level,children: cls_res,id,})detectionResultRef.value.getData(detectionData.value)// 绘制矩形imageCanvasCtx.value.beginPath()imageCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'imageCanvasCtx.value.lineWidth = 2imageCanvasCtx.value.strokeRect(x1, y1, width, height)// 绘制 level 标签imageCanvasCtx.value.font = '20px Arial'imageCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'const labelText = type.toString()const labelX = x1const labelY = y1 - 5 // 调整标签的垂直位置,使其位于矩形上方imageCanvasCtx.value.fillText(labelText, labelX, labelY)})
}// 绘制视频的标注框
function drawAnnotations() {if (video.value) {videoCanvasCtx.value?.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)if (video.value.paused) returnconst currentFrame = Math.round(video.value.currentTime * frameRate.value)let boxes = []// 查找当前时间对应的标注boxes = annotations.get(currentFrame)if (currentFrame === 0) {boxes = annotations.get(1)} else if (currentFrame > annotations.size) {boxes = annotations.get(annotations.size)}detectionData.value = []if (boxes?.length === 0) {videoCanvasCtx.value.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)return}// 绘制每个框boxes?.forEach(box => {// 解构坐标(归一化值)const [x1, y1, width, height, level, type, cls_res, id] = boxdetectionData.value.push({type: type,level: level,children: cls_res,id,})detectionResultRef.value.getData(detectionData.value)// 绘制矩形videoCanvasCtx.value.beginPath()videoCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'videoCanvasCtx.value.lineWidth = 2videoCanvasCtx.value.strokeRect(x1, y1, width, height)// 绘制 level 标签videoCanvasCtx.value.font = '20px Arial'videoCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'const labelText = type.toString()const labelX = x1const labelY = y1 - 5 // 调整标签的垂直位置,使其位于矩形上方videoCanvasCtx.value.fillText(labelText, labelX, labelY)})}
}// 视频高精度帧监听
function animationLoop() {drawAnnotations()requestAnimationFrame(animationLoop)
}

步骤5:点击标注框可查看目标细节

<!--展示目标细节的容器-->
<div class="detail"><div class="detail-title">细节</div><div class="magnifier-glass"><img id="croppedPreview" /></div>
</div><style lang="scss" scoped>
.detail {width: 300px;background: #fff;padding: 10px;.magnifier-glass {padding: 10px 0;width: calc(100% - 20px);overflow: scroll;#croppedPreview {object-fit: contain;border: 2px solid #fff;}}
}
</style>
// 点击画布上的标注框
function handleCanvasClick(event) {// 获取点击坐标let rect = isImage.value? imageCanvas.value.getBoundingClientRect(): videoCanvas.value.getBoundingClientRect()let clickedBox = nullconst clickX = event.clientX - rect.leftconst clickY = event.clientY - rect.top// 图片if (isImage.value) {// 查找被点击的框clickedBox = imgAnnotation.value.find(box => {const [x1, y1, width, height, level, type, cls_res] = boxreturn clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height})} else {// 视频// 获取当前帧数据const currentFrame = Math.floor(video.value.currentTime * frameRate.value)let boxes = []// 查找当前时间对应的标注boxes = annotations.get(currentFrame)if (currentFrame === 0) {boxes = annotations.get(1)} else if (currentFrame > annotations.size) {boxes = annotations.get(annotations.size)}if (!boxes) return// 查找被点击的框clickedBox = boxes.find(box => {const [x1, y1, width, height, level, type, cls_res] = boxreturn clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height})}if (clickedBox) {event.preventDefault()detectionResultRef.value.selectResult(clickedBox)captureBoxArea(clickedBox)clickedBoxId.value = clickedBox[clickedBox.length - 1]// 重新绘制标注框if (isImage.value) {drawImgAnnotations()} else {drawAnnotations()}}
}// 绘制被点击的目标
function captureBoxArea(box) {// 创建临时canvasconst tempCanvas = document.createElement('canvas')const tempCtx = tempCanvas.getContext('2d')// 设置临时canvas尺寸为实际视频尺寸let dom = isImage.value ? imgPreviewRef.value : video.valueconst domWidth = dom.getBoundingClientRect().widthconst domHeight = dom.getBoundingClientRect().heighttempCanvas.width = domWidthtempCanvas.height = domHeight// 绘制当前视频帧tempCtx.drawImage(dom, 0, 0, domWidth, domHeight)// 计算实际像素坐标const [x1, y1, width, height, level, type, cls_res] = box// 截取区域const imageData = tempCtx.getImageData(x1, y1, width, height)// 创建新canvas处理图像const croppedCanvas = document.createElement('canvas')croppedCanvas.width = widthcroppedCanvas.height = heightcroppedCanvas.getContext('2d').putImageData(imageData, 0, 0)// 显示预览const croppedPreview = document.getElementById('croppedPreview')croppedPreview.src = croppedCanvas.toDataURL()croppedPreview.style.display = 'block'
}

完整代码

<template><div class="analysis"><div class="analysis-top"><div class="preview-wrap"><div class="top-btns"><div v-if="checkState !== 1" class="left-btn"><a-button type="primary" @click="handleDataset">选择数据集</a-button><a-button type="primary" style="margin-left: 10px" @click="handleUpload">本地上传</a-button></div><div v-if="currentMedia" class="name">{{ currentMedia.fileName }}</div><div v-if="currentMedia" class="right-btn"><!--数量--><span class="num">{{ activeMediaIndex + 1 }} / {{ mediaList.length }}</span><span class="refresh" @click="changeLoad(datasetId)"><kp-icon name="icon_shuaxin"></kp-icon></span><span>模型类型:<a-selectv-model:value="modelType":options="modelTypeOptions"style="width: 80px"@change="reset"></a-select></span><!--检测按钮--><!-- <a-button class="btn" @click="handleDetection">检测</a-button> --><a-buttonv-if="!isImage && videoCheckState === 1"class="btn"@click="stopVideoDetection">暂停检测</a-button><a-button :loading="batchLoading" class="btn" @click="handleBatchDetection">批量检测</a-button></div></div><div class="image-list"><div v-if="currentMedia" class="btn" @click="prevPage"><kp-icon name="LeftOutlined" style="font-size: 30px"></kp-icon></div><div v-if="currentMedia" class="media"><div class="workspace"><div v-if="isImage" class="media-container"><img ref="imgPreviewRef" style="height: 100%; overflow: scroll" alt="" /><canvas id="imgCanvasOverlay"></canvas></div><!--视频预览--><div v-else class="media-container"><video id="videoElement" controls height="90%"></video><canvas id="canvasOverlay"></canvas></div></div></div><empty v-else title="暂无图像或视频" /><div v-if="currentMedia" class="btn" @click="nextPage"><kp-icon name="RightOutlined" style="font-size: 30px"></kp-icon></div></div></div></div><div class="analysis-bottom"><div class="result"><detection-resultref="detectionResultRef":current-media="currentMedia"@select-row="selectRow"></detection-result></div><div class="detail"><div class="detail-title">细节</div><div class="magnifier-glass"><img id="croppedPreview" /></div></div></div></div><select-dataset ref="selectDatasetRef" @handle-img-and-video="handleImgAndVideo"></select-dataset><local-upload ref="localUploadRef" @change-load="changeLoad"></local-upload>
</template><script setup lang="ts">
import '@kunpeng/layout/default/vgg.css'
import '@kunpeng/layout/default/vgg3.css'
import DetectionResult from './components/detectionResult.vue'
import SelectDataset from './components/selectDataset.vue'
import LocalUpload from './components/localUpload.vue'
import empty from '@/components/empty/index.vue'
import annotated from '@kunpeng/api/dataset-annotation/annotated'
import localImageVideo from '@/api/localImageVideo'
import { message } from 'ant-design-vue'
import { ref } from 'vue'const $store = inject('$store')
const baseApi = $store.$appStore.baseApi
const userInfo = $store.$userStore.userInfoconst resultZoomInImg = ref<string>({}) // 结果放大影像数据
const sourceCanvas = ref<HTMLCanvasElement | null>(null)
const isRunning = ref(false) // 视频是否正在播放
const mediaList = ref([]) // 图像或视频列表
const currentMedia = ref(null) // 当前图像或者视频
const selectDatasetRef = ref()
const localUploadRef = ref()
const openImage = ref(() => {})
const openVideo = ref(() => {})
const handleAnnotated = ref(() => {})
const handleImgDetection = ref(() => {}) // 检测单个图片
const activeMediaIndex = ref(0)
const currentStreamJson = ref('') // 当前sse接收到的标注信息
const imgAnnotation = ref([]) //图片检测结果
const detectionData = ref([]) // 检测结果数据
const batchLoading = ref(false) //批量检测加载状态
const checkState = ref(0) // 批量检测状态 0 未检测 1 检测中 2 检测完成
const videoCheckState = ref(0) // 视频检测状态 0 未检测 1 检测中 2 检测完成
//属性json
const attributes_json = ref({region: {},
})
const labelJson = ref([])
const fileList = ref([])
const allRegionList = ref([])
const videoInterval = ref(null)
const isImage = computed(() => {return ['png', 'jpg', 'bmp', 'jpeg', 'webp'].includes(currentMedia.value?.ext)
})
const imgPreviewRef = ref(null) // 图片
const imageCanvas = ref(null) // 图片的标注画布
const imageCanvasCtx = ref(null)
const video = ref(null) // 视频
const videoCanvas = ref(null) // 视频的标注画布
const videoCanvasCtx = ref(null)
let annotations = new Map() // 存储视频的标注数据,按时间戳索引
const frameRate = ref(25) // 视频的帧率
const detectionResultRef = ref(null)
const isCustomButton = ref(false)
const clickedBoxId = ref(null)
const modelType = ref(0)
const modelTypeOptions = window.config.modelTypeOptions
const datasetId = ref('')
const loading = ref(false) // 检测按钮loading状态
const taskId = ref('') // 图片视频检测任务id
const clientId = ref('') // 客户端id
const sseConnection = ref(null)
const shipDetectUrl = window.config.shipDetectUrlonMounted(() => {clientId.value = Math.random().toString(36).substring(2, 15)console.log(clientId.value)let sseUrl = `${shipDetectUrl}/sse/${clientId.value}`sseConnection.value = new EventSource(sseUrl)// Removed the nextTick block since we're now using @loadedmetadata directlysseConnection.value.addEventListener('message', event => {try {let data = JSON.parse(event.data)if (data && data.filename === currentMedia.value?.fileName) {// 情况一:图片if (!data['is_video']) {let scale = data.height / imgPreviewRef.value.getBoundingClientRect().height // 缩放比例data.data.map((item, index) => {imgAnnotation.value.push([item.det_res[0] / scale,item.det_res[1] / scale,(item.det_res[2] - item.det_res[0]) / scale,(item.det_res[3] - item.det_res[1]) / scale,item.det_res[4],item.det_res[5],item.cls_res,index,])})drawImgAnnotations()} else if (data['is_video']) {console.log('sse消息', data)// 情况二:视频// 收到消息开始播放视频if (videoCheckState.value === 0 && video.value.paused) {videoCheckState.value = 1} else if (data['video_end']) {videoCheckState.value = 2}frameRate.value = data.fpslet scale = data.height / video.value.getBoundingClientRect().height // 缩放比例let annotationData = data.data.map((item, index) => {return [item.det_res[0] / scale,item.det_res[1] / scale,(item.det_res[2] - item.det_res[0]) / scale,(item.det_res[3] - item.det_res[1]) / scale,item.det_res[4],item.det_res[5],item.cls_res,index,]})annotations.set(data.frame, annotationData)console.log('frame', data.frame)console.log('annotationData', annotationData)console.log('annotations', annotations)}} else if (data['batch_end']) {batchLoading.value = false// 情况三:批量检测checkState.value = 2message.success('已完成批量检测!')if (!isImage.value && video.value.paused) video.value.play()handleDetection()}} catch (error) {console.error('Error parsing SSE data:', error)}})sseConnection.value.addEventListener('error', error => {console.error('SSE connection error:', error)})
})
onUnmounted(() => {clearInterval(videoInterval.value)
})// 检测按钮点击事件
async function handleDetection() {let param = {source: currentMedia.value.filePath,model: modelType.value, // 模型client_id: clientId.value,}if (!isImage.value) {param.frame = Math.floor(video.value.currentTime * frameRate.value)}await localImageVideo.startDetect(param).then(res => {taskId.value = res.data.task_id})
}// 批量检测按钮点击事件
function handleBatchDetection() {checkState.value = 1message.info('批量检测中')batchLoading.value = truelet lastIndex = currentMedia.value.filePath.lastIndexOf('/')let param = {source: currentMedia.value.filePath.substring(0, lastIndex),model: modelType.value, // 模型client_id: clientId.value,}localImageVideo.startDetect(param)
}// 预览图像
function previewImg() {// imageCanvasCtx.value.clearRect(0, 0, imageCanvas.value.width, imageCanvas.value.height)nextTick(() => {imageCanvas.value = document.getElementById('imgCanvasOverlay')if (imageCanvas.value) {imageCanvasCtx.value = imageCanvas.value.getContext('2d')}imgPreviewRef.value.src = `/_api/detectionDataset/preview?filePath=${currentMedia.value.filePath.replaceAll('\\', '/')}`imgPreviewRef.value.onload = () => {imgAnnotation.value = []clickedBoxId.value = nullimageCanvas.value.width = imgPreviewRef.value.widthimageCanvas.value.height = imgPreviewRef.value.height// 如果批量检测结束,则调检测接口handleDetection()}imgPreviewRef.value.addEventListener('click', handleCanvasClick)})
}const isFirst = ref(false)
const isSeeking = ref(false) // 是否正在拖动进度条
const debounceTimer = ref(null) // 防抖定时器
const isRePlaying = ref(false) // 是否暂停后重新开始播放// 预览视频
function previewVideo() {annotations.clear()nextTick(() => {video.value = document.getElementById('videoElement')videoCanvas.value = document.getElementById('canvasOverlay')if (videoCanvas.value) {videoCanvasCtx.value = videoCanvas.value.getContext('2d')}if (video.value && currentMedia.value) {video.value.src = currentMedia.value.srcvideo.value.addEventListener('canplay', () => {if (!isFirst.value && video.value.currentTime <= 0.1) {handleDetection()video.value.playbackRate = 0.5video.value.play()animationLoop() // 开始绘制循环console.log('触发检测1111111111')isFirst.value = true}})video.value.addEventListener('play', () => {console.log('play开始播放')isFirst.value = falseclickedBoxId.value = null// animationLoop() // 开始绘制循环})video.value.addEventListener('pause', () => {if (!isRePlaying.value) {console.log('pause暂停播放')stopVideoDetection()}isRePlaying.value = true})video.value.addEventListener('playing', () => {console.log('暂停后重新开始播放')if (video.value.currentTime !== 0 && isRePlaying.value) {console.log('暂停后重新开始播放==只调用一次')console.log('触发检测222222222222222222')handleDetection()}isRePlaying.value = false})video.value.addEventListener('loadedmetadata', resizeCanvas)window.addEventListener('resize', resizeCanvas)video.value.addEventListener('click', handleCanvasClick)}})
}function handleCanvasClick(event) {// 获取点击坐标let rect = isImage.value? imageCanvas.value.getBoundingClientRect(): videoCanvas.value.getBoundingClientRect()let clickedBox = nullconst clickX = event.clientX - rect.leftconst clickY = event.clientY - rect.top// 图片if (isImage.value) {// 查找被点击的框clickedBox = imgAnnotation.value.find(box => {const [x1, y1, width, height, level, type, cls_res] = boxreturn clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height})} else {// 视频// 获取当前帧数据const currentFrame = Math.floor(video.value.currentTime * frameRate.value)let boxes = []// 查找当前时间对应的标注boxes = annotations.get(currentFrame)if (currentFrame === 0) {boxes = annotations.get(1)} else if (currentFrame > annotations.size) {boxes = annotations.get(annotations.size)}if (!boxes) return// 查找被点击的框clickedBox = boxes.find(box => {const [x1, y1, width, height, level, type, cls_res] = boxreturn clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height})}if (clickedBox) {event.preventDefault()detectionResultRef.value.selectResult(clickedBox)captureBoxArea(clickedBox)clickedBoxId.value = clickedBox[clickedBox.length - 1]// 重新绘制标注框if (isImage.value) {drawImgAnnotations()} else {drawAnnotations()}}
}function selectRow(data) {let boxes = []if (isImage.value) {boxes = imgAnnotation.value} else {// 获取当前帧数据// video.value.currentTime   这个是视频当前播放的时间 okconst currentFrame = Math.floor(video.value.currentTime * frameRate.value)// 查找当前时间对应的标注boxes = annotations.get(currentFrame)if (currentFrame === 0) {boxes = annotations.get(1)} else if (currentFrame > annotations.size) {boxes = annotations.get(annotations.size)}if (!boxes) return}// 查找被点击的框const clickedBox = boxes.find(box => {return data.id === box[box.length - 1]})clickedBoxId.value = clickedBox[clickedBox.length - 1]if (isImage.value) {drawImgAnnotations()} else {drawAnnotations()}captureBoxArea(clickedBox)
}function captureBoxArea(box) {// 创建临时canvasconst tempCanvas = document.createElement('canvas')const tempCtx = tempCanvas.getContext('2d')// 设置临时canvas尺寸为实际视频尺寸let dom = isImage.value ? imgPreviewRef.value : video.valueconst domWidth = dom.getBoundingClientRect().widthconst domHeight = dom.getBoundingClientRect().heighttempCanvas.width = domWidthtempCanvas.height = domHeight// 绘制当前视频帧tempCtx.drawImage(dom, 0, 0, domWidth, domHeight)// 计算实际像素坐标const [x1, y1, width, height, level, type, cls_res] = box// 截取区域const imageData = tempCtx.getImageData(x1, y1, width, height)// 创建新canvas处理图像const croppedCanvas = document.createElement('canvas')croppedCanvas.width = widthcroppedCanvas.height = heightcroppedCanvas.getContext('2d').putImageData(imageData, 0, 0)// 显示预览const croppedPreview = document.getElementById('croppedPreview')croppedPreview.src = croppedCanvas.toDataURL()croppedPreview.style.display = 'block'
}// 高精度帧监听
function animationLoop() {drawAnnotations()requestAnimationFrame(animationLoop)
}// 调整Canvas尺寸以匹配视频
function resizeCanvas() {videoCanvas.value.width = video.value.offsetWidthvideoCanvas.value.height = video.value.offsetHeight
}// 视频暂停检测
function stopVideoDetection() {let data = {task_id: taskId.value,}localImageVideo.pauseDetect(data).then(res => {if (res.status === 200) {videoCheckState.value = 0video.value?.pause()}})
}// 绘制图片的标注框
function drawImgAnnotations() {if (imageCanvasCtx.value)imageCanvasCtx.value.clearRect(0, 0, imageCanvas.value.width, imageCanvas.value.height)detectionData.value = []imgAnnotation.value.forEach(item => {const [x1, y1, width, height, level, type, cls_res, id] = itemdetectionData.value.push({type: type,level: level,children: cls_res,id,})detectionResultRef.value.getData(detectionData.value)console.log(clickedBoxId.value, id)// 绘制矩形imageCanvasCtx.value.beginPath()imageCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'imageCanvasCtx.value.lineWidth = 2imageCanvasCtx.value.strokeRect(x1, y1, width, height)// 绘制 level 标签imageCanvasCtx.value.font = '20px Arial'imageCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'const labelText = type.toString()const labelX = x1const labelY = y1 - 5 // 调整标签的垂直位置,使其位于矩形上方imageCanvasCtx.value.fillText(labelText, labelX, labelY)})
}// 绘制视频的标注框
function drawAnnotations() {if (video.value) {videoCanvasCtx.value?.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)if (video.value.paused) returnconst currentFrame = Math.round(video.value.currentTime * frameRate.value)console.log('currentTime', video.value.currentTime)console.log('currentFrame', currentFrame)let boxes = []// 查找当前时间对应的标注boxes = annotations.get(currentFrame)if (currentFrame === 0) {boxes = annotations.get(1)} else if (currentFrame > annotations.size) {boxes = annotations.get(annotations.size)}detectionData.value = []if (boxes?.length === 0) {videoCanvasCtx.value.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)return}console.log('boxes', boxes)// 绘制每个框boxes?.forEach(box => {// 解构坐标(归一化值)const [x1, y1, width, height, level, type, cls_res, id] = boxdetectionData.value.push({type: type,level: level,children: cls_res,id,})detectionResultRef.value.getData(detectionData.value)// 绘制矩形videoCanvasCtx.value.beginPath()videoCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'videoCanvasCtx.value.lineWidth = 2videoCanvasCtx.value.strokeRect(x1, y1, width, height)// 绘制 level 标签videoCanvasCtx.value.font = '20px Arial'videoCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'const labelText = type.toString()const labelX = x1const labelY = y1 - 5 // 调整标签的垂直位置,使其位于矩形上方videoCanvasCtx.value.fillText(labelText, labelX, labelY)})}
}async function getMediaList(datasetId, type) {let param = {page: {limit: -1,page: -1,total: 0,},querys: [{group: 'advance',operation: 'EQUAL',property: 'dataset_id',relation: 'AND',value: datasetId,},],}let res = await localImageVideo.getImgList(param)mediaList.value = res.data.listif (mediaList.value && mediaList.value.length > 0) {mediaList.value.forEach(item => {let random = Math.ceil(Math.random() * 100000)item.src = `/_api/detectionDataset/preview?time=${random}&&filePath=${item.filePath.replaceAll('\\', '/')}`})}if (type == 'localupload') {activeMediaIndex.value = 0currentMedia.value = mediaList.value[0]console.log('currentMedia', currentMedia.value)} else {activeMediaIndex.value = mediaList.value.findIndex(item => item.id === currentMedia.value.id)}if (isImage.value) {previewImg()} else {previewVideo()}reset()
}
// 选择数据集
async function handleImgAndVideo(datasetId: string, checkedList: string[]) {checkState.value = 0currentMedia.value = checkedList[0]getMediaList(datasetId, 'selectDataset')
}
// 本地上传的数据集
function changeLoad(id) {datasetId.value = idcheckState.value = 0getMediaList(id, 'localupload')
}// 重置数据
function reset() {detectionData.value = []detectionResultRef.value.getData(detectionData.value)// 清除细节画布const croppedPreview = document.getElementById('croppedPreview')croppedPreview.src = ''if (isImage.value) {imgAnnotation.value = []drawImgAnnotations()} else {annotations.clear()drawAnnotations()}
}function handleDataset() {if (!isImage.value) {stopVideoDetection()annotations.clear()}selectDatasetRef.value.open()
}// 本地上传
function handleUpload() {if (!isImage.value) {stopVideoDetection()annotations.clear()}localUploadRef.value.open()
}// 上一个
async function prevPage() {if (!isImage.value) {console.log('sp')stopVideoDetection()annotations.clear()}if (activeMediaIndex.value > 0) {activeMediaIndex.value--currentMedia.value = mediaList.value[activeMediaIndex.value]// 上一个为图片if (isImage.value) {previewImg()} else {// 上一个为视频previewVideo()}reset()} else {message.warning('此为第一个影像')}
}// 下一个
async function nextPage() {if (!isImage.value) {console.log('sp')stopVideoDetection()annotations.clear()}if (activeMediaIndex.value < mediaList.value.length - 1) {activeMediaIndex.value++currentMedia.value = mediaList.value[activeMediaIndex.value]reset()// 下一个为图片if (isImage.value) {previewImg()} else {// 下一个为视频previewVideo()}} else {message.warning('此为最后一个影像')}
}
</script><style scoped lang="scss">
.analysis {width: 100%;height: 100%;background: #f3f5fb;.analysis-top {height: 60%;display: flex;.preview-wrap {//width: calc(100% - 300px);width: 100%;background: #fff;//margin-right: 10px;.top-btns {width: 100%;height: 50px;padding: 10px 20px;position: relative;display: flex;justify-content: space-between;align-items: center;.right-btn {.refresh {margin: 0 20px 0 10px;}.btn {margin-left: 10px;}}.name {font-weight: 600;font-size: 16px;}}.image-list {width: 100%;height: calc(100% - 52px);display: flex;justify-content: space-between;align-items: center;.media {width: calc(100% - 60px);height: 100%;position: relative;}.workspace {width: 100%;height: 100%;display: flex;.media-container {width: 100%;height: 100%;position: relative;display: inline-block;img,video {position: absolute;left: 50%;transform: translateX(-50%);}#imgCanvasOverlay,#canvasOverlay {position: absolute;top: 0;// left: 0;left: 50%;transform: translateX(-50%);pointer-events: none; /* 确保canvas不阻挡视频操作 */z-index: 99;}}}.btn {height: 100%;line-height: calc(100% - 50px);width: 60px;display: flex;justify-content: center;align-items: center;// background: #ccc;}}}}.analysis-bottom {height: calc(40% - 10px);display: flex;margin-top: 10px;.result {width: calc(100% - 300px);height: 100%;background: #fff;margin-right: 10px;}.detail {width: 300px;background: #fff;padding: 10px;.magnifier-glass {padding: 10px 0;width: calc(100% - 20px);overflow: scroll;#croppedPreview {object-fit: contain;border: 2px solid #fff;}}}}
}
video::-webkit-media-controls-fullscreen-button {display: none;
}
</style>

http://www.dtcms.com/a/290656.html

相关文章:

  • AJAX 概念与 axios 使用
  • AI探索 | 基于 Node.js 开发 MCP 客户端+服务端及优秀项目分享
  • 【华为机试】240. 搜索二维矩阵 II
  • Node.js- node管理工具nvm
  • Git上传与下载GitHub仓库
  • 新手向:基于Python的快捷启动器(本地应用/文件秒开工具)
  • 本地项目提交到git教程
  • 代码随想录算法训练营二十二天|回溯part04
  • 第十八节:第八部分:java高级:动态代理设计模式介绍、准备工作、代码实现
  • 【设计模式C#】简单工厂模式(用于简化获取对象实例化的复杂性)
  • Spring Boot注解详解
  • PDF 表单字段属性详解
  • 泛型:C#中的类型抽象艺术
  • 三款适合户外探险、应急救援的智能三防手机,各有各的优势
  • kafka 日志索引 AbstractIndex
  • Elasticsearch X-Pack安全功能未启用的解决方案
  • 模型系列(篇一)-Bert
  • 暑期算法训练.5
  • 分布在内侧内嗅皮层(MEC)的带状细胞对NLP中的深层语义分析有什么积极的影响和启示
  • [硬件电路-64]:模拟器件 -二极管在稳压电路中的应用
  • Facebook 开源多季节性时间序列数据预测工具:Prophet 乘性季节性 Multiplicative Seasonality
  • JS实现矩阵左右旋转90度
  • uniapp app pdf.js报错:Uncaught SyntaxError:Unexpected token ‘{‘
  • 5道挑战题writup
  • 单体VS微服务:如何选择最适合的架构?
  • 人工智能之数学基础:事件间的关系
  • Leetcode力扣解题记录--第189题(巧思数组翻转)
  • 【MySQL】Linux配置MySQL Windows远程连接
  • 客流分析核心算法 trajectory_event_analyzer数据结构
  • Python-数据库概念-pymysql-元编程-SQLAlchemy-学习笔记