ARKit
、Core ML
、FileProvider
、IdentityLookup
、Core NFC
、Vison
等。Vison
官方文檔Face Detection and Recognition
: 人臉檢測
Image Alignment Analysis
: 圖像對比分析Barcode Detection
: 二維碼/條形碼檢測
Text Detection
: 文字檢測
Object Detection and Tracking
: 目標跟蹤
CVPixelBufferRef
CGImageRef
CIImage
NSURL
NSData
CVPixelBuffer
CGImage
CIImage
URL
Data
具體詳情可在
Vision.framework
的VNImageRequestHandler.h
文件中查看git
vision
的時候,咱們首先須要明確本身須要什麼效果,而後根據想要的效果來選擇不一樣的類Request
提供給一個 RequestHandler
Handler
持有須要識別的圖片信息,並將處理結果分發給每一個 Request
的 completion Block
中results
屬性中獲得 Observation
數組observations
數組中的內容根據不一樣的request請求返回了不一樣的observation
Observation
有boundingBox
,landmarks
等屬性,存儲的是識別後物體的座標,點位等RequestHandler
處理請求對象VNImageRequestHandler
: 處理與單個圖像有關的一個或多個圖像分析請求的對象
CVPixelBuffer
, CGImage
, CIImage
, URL
, Data
VNSequenceRequestHandler
: 處理與多個圖像序列有關的圖像分析請求的對象
VNRequest
: 圖像分析請求的抽象類, 繼承於NSObject
VNBaseImageRequest
: 專一於圖像的特定部分的分析請求VNObservation
檢測對象VNObservation
: 圖像分析結果的抽象類, 繼承與NSObject
VNImageRequestHandler
對象時, 可接受的的CIImage
//1. 轉成ciimage
guard let ciImage = CIImage(image: image) else { return }
複製代碼
let requestHandle = VNImageRequestHandler(ciImage: ciImage, options: [:])
複製代碼
VNRequest
: 是全部請求Request的父類public typealias VNRequestCompletionHandler = (VNRequest, Error?) -> Swift.Void
複製代碼
//4. 設置回調
let completionHandle: VNRequestCompletionHandler = { request, error in
let observations = request.results
//識別出來的對象數組
}
複製代碼
//無參數
public convenience init()
//閉包參數
public init(completionHandler: Vision.VNRequestCompletionHandler? = nil)
複製代碼
let baseRequest = VNDetectTextRectanglesRequest(completionHandler: completionHandle)
複製代碼
// 設置識別具體文字
baseRequest.setValue(true, forKey: "reportCharacterBoxes")
複製代碼
open func perform(_ requests: [VNRequest]) throws
複製代碼
//6. 發送請求
DispatchQueue.global().async {
do{
try requestHandle.perform([baseRequest])
}catch{
print("Throws:\(error)")
}
}
複製代碼
Observations
對象results
是[Any]?
類型boundingBox
屬性能夠獲取到對應的文本區域的尺寸boundingBox
獲得的是相對iamge的比例尺寸, 都是小於1的//1. 獲取識別到的VNTextObservation
guard let boxArr = observations as? [VNTextObservation] else { return }
//2. 建立rect數組
var bigRects = [CGRect](), smallRects = [CGRect]()
//3. 遍歷識別結果
for boxObj in boxArr {
// 3.1尺寸轉換
//獲取一行文本的區域位置
bigRects.append(convertRect(boxObj.boundingBox, image))
//2. 獲取
guard let rectangleArr = boxObj.characterBoxes else { continue }
for rectangle in rectangleArr{
//3. 獲得每個字體的的尺寸
let boundBox = rectangle.boundingBox
smallRects.append(convertRect(boundBox, image))
}
}
複製代碼
座標轉換github
/// image座標轉換
fileprivate func convertRect(_ rectangleRect: CGRect, _ image: UIImage) -> CGRect {
//此處是將Image的實際尺寸轉化成imageView的尺寸
let imageSize = image.scaleImage()
let w = rectangleRect.width * imageSize.width
let h = rectangleRect.height * imageSize.height
let x = rectangleRect.minX * imageSize.width
//該Y座標與UIView的Y座標是相反的
let y = (1 - rectangleRect.minY) * imageSize.height - h
return CGRect(x: x, y: y, width: w, height: h)
}
複製代碼
//1. 轉成ciimage
guard let ciImage = CIImage(image: image) else { return }
//2. 建立處理request
let requestHandle = VNImageRequestHandler(ciImage: ciImage, options: [:])
//3. 建立baseRequest
//大多數識別請求request都繼承自VNImageBasedRequest
var baseRequest = VNImageBasedRequest()
//4. 設置回調
let completionHandle: VNRequestCompletionHandler = { request, error in
let observations = request.results
self.handleImageObservable(type: type, image: image, observations, completeBack)
}
//5. 建立識別請求
switch type {
case .rectangle:
baseRequest = VNDetectRectanglesRequest(completionHandler: completionHandle)
case .staticFace:
baseRequest = VNDetectFaceRectanglesRequest(completionHandler: completionHandle)
default:
break
}
複製代碼
/// 矩形檢測
fileprivate func rectangleDectect(_ observations: [Any]?, image: UIImage, _ complecHandle: JunDetectHandle){
//1. 獲取識別到的VNRectangleObservation
guard let boxArr = observations as? [VNRectangleObservation] else { return }
//2. 建立rect數組
var bigRects = [CGRect]()
//3. 遍歷識別結果
for boxObj in boxArr {
// 3.1
bigRects.append(convertRect(boxObj.boundingBox, image))
}
//4. 回調結果
complecHandle(bigRects, [])
}
複製代碼
observation
轉成VNFaceObservation
guard let boxArr = observations as? [VNFaceObservation] else { return }
複製代碼
VNDetectBarcodesRequest
的兩個參數//支持的可識別的條碼類型(須要直接用class調用)
open class var supportedSymbologies: [VNBarcodeSymbology] { get }
//設置可識別的條碼類型
open var symbologies: [VNBarcodeSymbology]
複製代碼
supportedSymbologies
參數的調用方法let request = VNDetectBarcodesRequest(completionHandler: completionHandle)
request.symbologies = VNDetectBarcodesRequest.supportedSymbologies
複製代碼
observations
轉成[VNBarcodeObservation]
VNBarcodeObservation
有三個屬性//條碼類型: qr, code128....等等
open var symbology: VNBarcodeSymbology { get }
//條碼的相關信息
open var barcodeDescriptor: CIBarcodeDescriptor? { get }
//若是是二維碼, 則是二維碼的網址連接
open var payloadStringValue: String? { get }
複製代碼
payloadStringValue
參數則是小編的簡書地址CIBarcodeDescriptor
對象/// 二維碼信息處理
fileprivate func qrCodeHandle(barCode: CIBarcodeDescriptor?){
//1. 轉成對應的條碼對象
guard let code = barCode as? CIQRCodeDescriptor else { return }
//2. 解讀條碼信息
let level = code.errorCorrectionLevel.hashValue
let version = code.symbolVersion
let mask = code.maskPattern
let data = code.errorCorrectedPayload
let dataStr = String(data: data, encoding: .utf8)
print("這是二維碼信息--", level, "---", version, "----", mask, "---", dataStr ?? "")
}
複製代碼
VNFaceLandmarks2D
介紹/// 臉部輪廓
var faceContour: VNFaceLandmarkRegion2D?
/// 左眼, 右眼
var leftEye: VNFaceLandmarkRegion2D?
var rightEye: VNFaceLandmarkRegion2D?
/// 左睫毛, 右睫毛
var leftEyebrow: VNFaceLandmarkRegion2D?
var rightEyebrow: VNFaceLandmarkRegion2D?
/// 左眼瞳, 右眼瞳
var leftPupil: VNFaceLandmarkRegion2D?
var rightPupil: VNFaceLandmarkRegion2D?
/// 鼻子, 鼻嵴, 正中線
var nose: VNFaceLandmarkRegion2D?
var noseCrest: VNFaceLandmarkRegion2D?
var medianLine: VNFaceLandmarkRegion2D?
/// 外脣, 內脣
var outerLips: VNFaceLandmarkRegion2D?
var innerLips: VNFaceLandmarkRegion2D?
複製代碼
//某一部位全部的像素點
@nonobjc public var normalizedPoints: [CGPoint] { get }
//某一部位的全部像素點的個數
open var pointCount: Int { get }
複製代碼
func draw(_ rect: CGRect)
方法//5.1 獲取當前上下文
let content = UIGraphicsGetCurrentContext()
//5.2 設置填充顏色(setStroke設置描邊顏色)
UIColor.green.set()
//5.3 設置寬度
content?.setLineWidth(2)
//5.4. 設置線的類型(鏈接處)
content?.setLineJoin(.round)
content?.setLineCap(.round)
//5.5. 設置抗鋸齒效果
content?.setShouldAntialias(true)
content?.setAllowsAntialiasing(true)
//5.6 開始繪製
content?.addLines(between: pointArr)
content?.drawPath(using: .stroke)
//5.7 結束繪製
content?.strokePath()
複製代碼
因爲真機很差錄製gif圖(嘗試了一下, 效果不是很好, 放棄了), 想看效果的朋友下載源碼真機運行吧數組
這裏提供一張可供掃描的圖片bash
request
的初始化這裏就不作介紹了, 說一下handle
的初始化方法閉包
CVPixelBuffer
: 掃描實時輸出的對象//1. 建立處理請求
let faceHandle = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:])
複製代碼
DispatchQueue.global().async {
do{
try faceHandle.perform([baseRequest])
}catch{
print("Throws:\(error)")
}
}
複製代碼
/// H偶去轉換後的尺寸座標
fileprivate func getEyePoint(faceModel: FaceFeatureModel, position: AVCaptureDevice.Position) -> CGRect{
//1. 獲取左右眼
guard let leftEye = faceModel.leftEye else { return CGRect.zero }
guard let rightEye = faceModel.rightEye else { return CGRect.zero }
//2. 位置數組
let leftPoint = conventPoint(landmark: leftEye, faceRect: faceModel.faceObservation.boundingBox, position: position)
let rightPoint = conventPoint(landmark: rightEye, faceRect: faceModel.faceObservation.boundingBox, position: position)
//3. 排序
let pointXs = (leftPoint.0 + rightPoint.0).sorted()
let pointYs = (leftPoint.1 + rightPoint.1).sorted()
//4. 添加眼睛
let image = UIImage(named: "eyes")!
let imageWidth = (pointXs.last ?? 0.0) - (pointXs.first ?? 0) + 40
let imageHeight = image.size.height / image.size.width * imageWidth
return CGRect(x: (pointXs.first ?? 0) - 20, y: (pointYs.first ?? 0) - 5, width: imageWidth, height: imageHeight)
}
複製代碼
/// 座標轉換
fileprivate func conventPoint(landmark: VNFaceLandmarkRegion2D, faceRect: CGRect, position: AVCaptureDevice.Position) -> ([CGFloat], [CGFloat]){
//1. 定義
var XArray = [CGFloat](), YArray = [CGFloat]()
let viewRect = previewLayer.frame
//2. 遍歷
for i in 0..<landmark.pointCount {
//2.1 獲取當前位置並轉化到合適尺寸
let point = landmark.normalizedPoints[i]
let rectWidth = viewRect.width * faceRect.width
let rectHeight = viewRect.height * faceRect.height
let rectY = viewRect.height - (point.y * rectHeight + faceRect.minY * viewRect.height)
var rectX = point.x * rectWidth + faceRect.minX * viewRect.width
if position == .front{
rectX = viewRect.width + (point.x - 1) * rectWidth
}
XArray.append(rectX)
YArray.append(rectY)
}
return (XArray, YArray)
}
複製代碼
CGRect
, 添加眼鏡效果便可VNDetectedObjectObservation
fileprivate var lastObservation: VNDetectedObjectObservation?
複製代碼
//處理與多個圖像序列的請求handle
let sequenceHandle = VNSequenceRequestHandler()
複製代碼
//4. 建立跟蹤識別請求
let trackRequest = VNTrackObjectRequest(detectedObjectObservation: lastObservation, completionHandler: completionHandle)
//將精度設置爲高
trackRequest.trackingLevel = .accurate
複製代碼
//2. 轉換座標
let convertRect = visionTool.convertRect(viewRect: redView.frame, layerRect: previewLayer.frame)
//3. 根據點擊的位置獲取新的對象
let newObservation = VNDetectedObjectObservation(boundingBox: convertRect)
lastObservation = newObservation
複製代碼
VNDetectedObjectObservation
對象, 從新賦值//1. 獲取一個實際的結果
guard let newObservation = observations?.first as? VNDetectedObjectObservation else { return }
//2. 從新賦值
self.lastObservation = newObservation
複製代碼
//4. 座標轉換
let newRect = newObservation.boundingBox
let convertRect = visionTool.convertRect(newRect, self.previewLayer.frame)
self.redView.frame = convertRect
複製代碼
以上就是iOS 11的新框架Vision在Swift中的全部使用的狀況app