ml-vision
The Firebase ML Kit service interface.
This module is available for the default app only.
Example
Get the ML Kit service for the default app:
const defaultAppMLKit = firebase.vision();
Properties
Methods
barcodeDetectorProcessImage
</>Returns an array of barcodes (as VisionBarcode
) detected for a local image file path.
barcodeDetectorProcessImage(imageFilePath: string, barcodeDetectorOptions?: MLKitVision.VisionBarcodeDetectorOptions): Promise<VisionBarcode[]>;
cloudDocumentTextRecognizerProcessImage
</>Detect text within a document using a local image file from the cloud (Firebase) model.
cloudDocumentTextRecognizerProcessImage(imageFilePath: string, cloudDocumentTextRecognizerOptions?: VisionCloudDocumentTextRecognizerOptions): Promise<VisionDocumentText>;
cloudImageLabelerProcessImage
</>Returns an array of labels (as VisionImageLabel
) of a given local image file path. Label detection is done
on cloud (Firebase), resulting in slower results but more descriptive.
cloudImageLabelerProcessImage(imageFilePath: string, cloudImageLabelerOptions?: VisionCloudImageLabelerOptions): Promise<VisionImageLabel[]>;
cloudLandmarkRecognizerProcessImage
</>Returns an array of landmarks (as VisionLandmark
) of a given local image file path. Landmark detection
is done on cloud (Firebase).
cloudLandmarkRecognizerProcessImage(imageFilePath: string, cloudLandmarkRecognizerOptions?: VisionCloudLandmarkRecognizerOptions): Promise<VisionLandmark[]>;
cloudTextRecognizerProcessImage
</>Detect text from a local image file using the cloud (Firebase) model.
cloudTextRecognizerProcessImage(imageFilePath: string, cloudTextRecognizerOptions?: VisionCloudTextRecognizerOptions): Promise<VisionText>;
faceDetectorProcessImage
</>Detects faces from a local image file.
faceDetectorProcessImage(imageFilePath: string, faceDetectorOptions?: VisionFaceDetectorOptions): Promise<VisionFace[]>;
imageLabelerProcessImage
</>Returns an array of labels (as VisionImageLabel
) of a given local image file path. Label detection is done
on device, resulting in faster results but less descriptive.
imageLabelerProcessImage(imageFilePath: string, imageLabelerOptions?: VisionImageLabelerOptions): Promise<VisionImageLabel[]>;
textRecognizerProcessImage
</>Detect text from a local image file using the on-device model.
textRecognizerProcessImage(imageFilePath: string): Promise<VisionText>;
Statics
- VisionBarcodeAddressType
- VisionBarcodeEmailType
- VisionBarcodeFormat
- VisionBarcodePhoneType
- VisionBarcodeValueType
- VisionBarcodeWifiEncryptionType
- VisionCloudLandmarkRecognizerModelType
- VisionCloudTextRecognizerModelType
- VisionDocumentTextRecognizedBreakType
- VisionFaceContourType
- VisionFaceDetectorClassificationMode
- VisionFaceDetectorContourMode
- VisionFaceDetectorLandmarkMode
- VisionFaceDetectorPerformanceMode
- VisionFaceLandmarkType
VisionBarcodeAddressType
</>ml-vision.VisionBarcodeAddressType: ;
VisionBarcodeEmailType
</>ml-vision.VisionBarcodeEmailType: ;
VisionBarcodeFormat
</>ml-vision.VisionBarcodeFormat: ;
VisionBarcodePhoneType
</>ml-vision.VisionBarcodePhoneType: ;
VisionBarcodeValueType
</>ml-vision.VisionBarcodeValueType: ;
VisionBarcodeWifiEncryptionType
</>ml-vision.VisionBarcodeWifiEncryptionType: ;
VisionCloudLandmarkRecognizerModelType
</>ml-vision.VisionCloudLandmarkRecognizerModelType: ;
VisionCloudTextRecognizerModelType
</>ml-vision.VisionCloudTextRecognizerModelType: ;
VisionDocumentTextRecognizedBreakType
</>ml-vision.VisionDocumentTextRecognizedBreakType: ;
VisionFaceContourType
</>ml-vision.VisionFaceContourType: ;
VisionFaceDetectorClassificationMode
</>ml-vision.VisionFaceDetectorClassificationMode: ;
VisionFaceDetectorContourMode
</>ml-vision.VisionFaceDetectorContourMode: ;
VisionFaceDetectorLandmarkMode
</>ml-vision.VisionFaceDetectorLandmarkMode: ;
VisionFaceDetectorPerformanceMode
</>ml-vision.VisionFaceDetectorPerformanceMode: ;
VisionFaceLandmarkType
</>ml-vision.VisionFaceLandmarkType: ;