手机可视化方案(针对浓度识别)
手机拍照后直接识别的解决方案
要实现手机拍照后直接进行识别,有几种不同的方法,具体取决于您的技术栈和需求。以下是几种可行的方案:
方案一:开发移动端应用(推荐)
开发一个移动端应用,可以直接在手机上拍照并进行识别:
iOS 方案(使用 Swift + Core ML)
import UIKit
import CoreML
import Visionclass ViewController: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {@IBOutlet weak var imageView: UIImageView!@IBOutlet weak var resultLabel: UILabel!var model: VNCoreMLModel!override func viewDidLoad() {super.viewDidLoad()// 加载模型do {let configuration = MLModelConfiguration()let coreMLModel = try two_try_rgb_concentration_model(configuration: configuration)model = try VNCoreMLModel(for: coreMLModel.model)} catch {print("加载模型失败: \(error)")}}@IBAction func takePhoto(_ sender: UIButton) {let picker = UIImagePickerController()picker.delegate = selfpicker.sourceType = .camerapresent(picker, animated: true)}func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {picker.dismiss(animated: true)guard let image = info[.originalImage] as? UIImage else {return}imageView.image = imageprocessImage(image)}func processImage(_ image: UIImage) {guard let ciImage = CIImage(image: image) else {return}let request = VNCoreMLRequest(model: model) { [weak self] request, error inguard let results = request.results as? [VNClassificationObservation],let topResult = results.first else {return}DispatchQueue.main.async {self?.resultLabel.text = "识别结果: \(topResult.identifier) (\(Int(topResult.confidence * 100))%)"}}let handler = VNImageRequestHandler(ciImage: ciImage)DispatchQueue.global(qos: .userInitiated).async {do {try handler.perform([request])} catch {print("识别失败: \(error)")}}}
}
Android 方案(使用 Kotlin + TensorFlow Lite)
class MainActivity : AppCompatActivity() {private lateinit var tflite: Interpreterprivate lateinit var labelList: List<String>override fun onCreate(savedInstanceState: Bundle?) {super.onCreate(savedInstanceState)setContentView(R.layout.activity_main)// 加载模型和标签try {tflite = Interpreter(loadModelFile())labelList = loadLabelList()} catch (e: Exception) {Log.e("MainActivity", "加载模型失败", e)}}fun takePhoto(view: View) {val intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)startActivityForResult(intent, REQUEST_IMAGE_CAPTURE)}override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {if (requestCode == REQUEST_IMAGE_CAPTURE && resultCode == RESULT_OK) {val imageBitmap = data?.extras?.get("data") as BitmapimageView.setImageBitmap(imageBitmap)processImage(imageBitmap)}}private fun processImage(bitmap: Bitmap) {// 预处理图像val resizedBitmap = Bitmap.createScaledBitmap(bitmap, INPUT_SIZE, INPUT_SIZE, true)val byteBuffer = convertBitmapToByteBuffer(resizedBitmap)// 运行推理val output = Array(1) { FloatArray(labelList.size) }tflite.run(byteBuffer, output)// 处理结果val result = output[0]val maxIndex = result.indices.maxByOrNull { result[it] } ?: -1val confidence = result[maxIndex]val label = if (maxIndex >= 0) labelList[maxIndex] else "未知"runOnUiThread {resultTextView.text = "识别结果: $label (${(confidence * 100).toInt()}%)"}}private fun loadModelFile(): MappedByteBuffer {val fileDescriptor = assets.openFd("concentration_model.tflite")val inputStream = FileInputStream(fileDescriptor.fileDescriptor)val fileChannel = inputStream.channelval startOffset = fileDescriptor.startOffsetval declaredLength = fileDescriptor.declaredLengthreturn fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength)}private fun loadLabelList(): List<String> {return assets.open("labels.txt").bufferedReader().useLines { it.toList() }}
}
方案二:Web应用 + 服务器方案
开发一个Web应用,通过浏览器调用手机摄像头,然后将图片发送到服务器进行识别:
前端代码(HTML + JavaScript)
<!DOCTYPE html>
<html>
<head><title>浓度识别</title><style>#video, #canvas, #result {display: block;margin: 10px auto;max-width: 100%;}button {display: block;margin: 10px auto;padding: 10px 20px;font-size: 16px;}</style>
</head>
<body><video id="video" autoplay playsinline></video><button id="capture">拍照识别</button><canvas id="canvas"></canvas><div id="result"></div><script>const video = document.getElementById('video');const canvas = document.getElementById('canvas');const context = canvas.getContext('2d');const captureButton = document.getElementById('capture');const resultDiv = document.getElementById('result');// 访问摄像头navigator.mediaDevices.getUserMedia({ video: true, audio: false }).then(stream => {video.srcObject = stream;}).catch(err => {console.error('无法访问摄像头:', err);resultDiv.textContent = '无法访问摄像头: ' + err.message;});// 拍照并识别captureButton.addEventListener('click', () => {// 设置canvas尺寸与视频相同canvas.width = video.videoWidth;canvas.height = video.videoHeight;// 绘制当前视频帧到canvascontext.drawImage(video, 0, 0, canvas.width, canvas.height);// 转换为Blob并发送到服务器canvas.toBlob(blob => {const formData = new FormData();formData.append('image', blob, 'photo.jpg');fetch('/predict', {method: 'POST',body: formData}).then(response => response.json()).then(data => {resultDiv.textContent = `识别结果: ${data.concentration} (${data.confidence}%)`;}).catch(err => {console.error('识别失败:', err);resultDiv.textContent = '识别失败: ' + err.message;});}, 'image/jpeg', 0.8);});</script>
</body>
</html>
后端代码(Python + Flask)
from flask import Flask, request, jsonify
from PIL import Image
import numpy as np
import joblib
import ioapp = Flask(__name__)# 加载模型
model = joblib.load('two_try_rgb_concentration_model.pkl')# 浓度编码与实际浓度的映射关系
concentration_mapping = {0: '1n', 1: '500p', 2: '100p', 3: '50p', 4: '0'}# 固定通道参数(圆心坐标和半径)
FIXED_CHANNELS = {1: {'center': (883, 980), 'radius': 100},2: {'center': (1523, 977), 'radius': 100},3: {'center': (2166, 973), 'radius': 100},4: {'center': (906, 1623), 'radius': 100},5: {'center': (1529, 1623), 'radius': 100},6: {'center': (2152, 1626), 'radius': 100},7: {'center': (896, 2265), 'radius': 100},8: {'center': (1553, 2245), 'radius': 100},9: {'center': (2179, 2257), 'radius': 100},
}def extract_rgb_from_image(image):"""从图像中提取固定区域的RGB值"""import cv2# 转换为OpenCV格式opencv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)rgb = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)results = []# 提取每个固定通道的RGB值for channel_id, params in FIXED_CHANNELS.items():x, y = params['center']r = params['radius']# 创建圆形掩码mask = np.zeros(rgb.shape[:2], dtype=np.uint8)cv2.circle(mask, (x, y), r, 255, -1)# 计算掩码区域的RGB平均值r_mean = cv2.mean(rgb[:, :, 0], mask=mask)[0]g_mean = cv2.mean(rgb[:, :, 1], mask=mask)[0]b_mean = cv2.mean(rgb[:, :, 2], mask=mask)[0]results.append({'通道编号': channel_id,'R': round(r_mean, 2),'G': round(g_mean, 2),'B': round(b_mean, 2)})return results@app.route('/predict', methods=['POST'])
def predict():if 'image' not in request.files:return jsonify({'error': '没有上传图片'}), 400# 读取上传的图片image_file = request.files['image']image = Image.open(io.BytesIO(image_file.read()))# 提取RGB特征rgb_data = extract_rgb_from_image(image)# 对每个通道进行预测predictions = []for channel_data in rgb_data:features = np.array([[channel_data['R'], channel_data['G'], channel_data['B']]])prediction = model.predict(features)[0]confidence = np.max(model.predict_proba(features)) * 100predictions.append({'channel': channel_data['通道编号'],'concentration': concentration_mapping[prediction],'confidence': round(confidence, 2)})return jsonify({'predictions': predictions})if __name__ == '__main__':app.run(host='0.0.0.0', port=5000, debug=True)
方案三:使用现成的移动端机器学习框架
1. 使用 Google ML Kit(Android/iOS)
ML Kit 提供了易于使用的API,可以在移动设备上运行自定义TensorFlow Lite模型。
2. 使用 Fritz AI
Fritz AI 提供了端到端的移动机器学习平台,支持模型转换、部署和优化。
3. 使用 Apple's Create ML
如果您主要针对iOS设备,可以使用Create ML来训练和部署模型。
实施建议
模型转换:将您的scikit-learn模型转换为适合移动端的格式(如TensorFlow Lite或Core ML)
模型优化:对模型进行量化、剪枝等优化,以减少模型大小和提高推理速度
用户体验:设计直观的用户界面,让用户可以轻松拍照并查看识别结果
性能考虑:考虑在不同设备上的性能差异,可能需要提供不同精度的模型版本
部署注意事项
隐私保护:如果使用服务器方案,确保用户图片数据得到妥善保护
网络要求:服务器方案需要稳定的网络连接
设备兼容性:确保应用在各种设备和操作系统版本上都能正常工作
模型更新:设计机制以便在不更新整个应用的情况下更新模型
选择哪种方案取决于您的技术栈、目标用户和设备要求。如果您主要针对特定平台(如iOS),原生应用可能是最佳选择。如果您希望跨平台使用,Web应用可能是更好的选择。