Skip to content

Detection

The basic face detection may be performed in two ways:

  • By the selected parameters when you add them manually to the request. In this case, the configuration is not saved and can not be automatically repeated.
  • By a processing scenario that already includes certain parameters. You can use the predefined scenarios or add a custom one. Find the exhaustive information on the Scenarios page.

The request and response parameters are described on the Detection page in the Web Service section.

detectFaces

The detectFaces instance represents all the parameters required for the face detection operation. It takes the DetectFacesRequest object, see it's structure.

To enable face detection, invoke:

FaceSDK.service.detectFaces(by: detectFacesRequest) { detectFacesResponse in
  // handle response
}
[RFSFaceSDK.service detectFacesByRequest:detectFaceRequest completion:^(RFSDetectFacesResponse *response) {
  // handle response
}];
FaceSDK.Instance().detectFaces(request) { detectFacesResponse:
                                          DetectFacesResponse ->
    // handle response
}
FaceSDK.Instance().detectFaces(detectFacesRequest, new DetectFacesCompletion() {
    @Override
    public void onDetectFacesCompleted(@NonNull DetectFacesResponse detectFacesResponse) {
        // handle response
    }
});
FaceSDK.detectFaces(JSON.stringify(detectFacesRequest), raw => {
  var detectFacesResponse = DetectFacesResponse.fromJson(JSON.parse(raw))
  // handle response
}, e => { })
FaceSDK.detectFaces(jsonEncode(detectFacesRequest)).then((raw) {
  var detectFacesResponse = DetectFacesResponse.fromJson(jsonDecode(raw));
  // handle response
});
FaceSDK.detectFaces(JSON.stringify(detectFacesRequest), raw => {
detectFacesResponse = DetectFacesResponse.fromJson(JSON.parse(raw))
    // handle response
}, e => { })
  FaceSDK.detectFaces(JSON.stringify(detectFacesRequest)).then(raw => {
    var detectFacesResponse = DetectFacesResponse.fromJson(JSON.parse(raw))
    // handle response
  })
// Android
FaceSDK.Instance().DetectFaces(detectFacesRequest, this);

public void OnDetectFacesCompleted(DetectFacesResponse response)
{
    // handle response
}

// iOS
RFSFaceSDK.Service.DetectFacesByRequest(detectFacesRequest, (RFSDetectFacesResponse response) =>
{
    // handle response
});

Request

By the set parameters

let detectFacesRequest = DetectFacesRequest(image: image, configuration: configuration)
  RFSDetectFacesRequest *detectFacesRequest = [[RFSDetectFacesRequest alloc] initWithImage:image 
                                                                             configuration:configuration];
val detectFacesRequest = DetectFacesRequest(bitmap, configuration)
DetectFacesRequest detectFacesRequest = new DetectFacesRequest(bitmap, configuration);
let detectFacesRequest = new DetectFacesRequest()
detectFacesRequest.image = bitmap
detectFacesRequest.configuration = configuration
var detectFacesRequest = new DetectFacesRequest();
detectFacesRequest.image = bitmap;
detectFacesRequest.configuration = configuration;
detectFacesRequest = new DetectFacesRequest()
detectFacesRequest.image = bitmap
detectFacesRequest.configuration = configuration
let detectFacesRequest = new DetectFacesRequest()
detectFacesRequest.image = bitmap
detectFacesRequest.configuration = configuration
// Android
var detectFacesRequest = new DetectFacesRequest(bitmap, configuration);

// iOS
var detectFacesRequest = new RFSDetectFacesRequest(uiImage, configuration);

By the scenario

let detectFacesRequest = DetectFacesRequest.qualityICAORequest(for: image)
RFSDetectFacesRequest *detectFaceRequest = [RFSDetectFacesRequest qualityICAORequestForImage:image];
val detectFacesRequest = DetectFacesRequest.qualityICAORequestForImage(bitmap)
DetectFacesRequest detectFacesRequest = 
DetectFacesRequest.qualityICAORequestForImage(bitmap);
let detectFacesRequest = new DetectFacesRequest()
detectFacesRequest.scenario = DetectFacesScenario.QUALITY_ICAO
detectFacesRequest.image = bitmap
var detectFacesRequest = new DetectFacesRequest();
detectFacesRequest.scenario = DetectFacesScenario.QUALITY_ICAO;
detectFacesRequest.image = bitmap;
detectFacesRequest = new DetectFacesRequest()
detectFacesRequest.scenario = Enum.DetectFacesScenario.QUALITY_ICAO
detectFacesRequest.image = bitmap
let detectFacesRequest = new DetectFacesRequest()
detectFacesRequest.scenario = DetectFacesScenario.QUALITY_ICAO
detectFacesRequest.image = bitmap
// Android
var detectFacesRequest = DetectFacesRequest.QualityICAORequestForImage(bitmap);

// iOS
var detectFacesRequest = RFSDetectFacesRequest.QualityICAORequestForImage(uiImage);

Creating configuration

The SDK uses the following configuration parameters to detect faces: onlyCentralFace and outputImageParams Also, you can check face image quality and evaluate attributes.

To set config parameters, invoke:

let configuration = DetectFacesConfiguration()
RFSDetectFacesConfiguration *configuration = [[RFSDetectFacesConfiguration alloc] init];
val configuration = DetectFacesConfiguration()
DetectFacesConfiguration configuration = new DetectFacesConfiguration();
let configuration = new DetectFacesConfiguration()
var configuration = new DetectFacesConfiguration();
configuration = new DetectFacesConfiguration()
let configuration = new DetectFacesConfiguration()
// Android
var configuration = new DetectFacesConfiguration();

// iOS
var configuration = new RFSDetectFacesConfiguration();

OnlyCentralFace

This parameter defines whether to process only the central face in the image or all the faces. If set to true, the SDK detects and processes only one—the most central face in the image. If set to false, the SDK processess all faces in the image.

configuration.onlyCentralFace = true
configuration.onlyCentralFace = YES;
configuration.onlyCentralFace = true
configuration.setOnlyCentralFace(true);
configuration.onlyCentralFace = true
configuration.onlyCentralFace = true;
configuration.onlyCentralFace = true
configuration.onlyCentralFace = true
// Android
configuration.OnlyCentralFace = (Java.Lang.Boolean)true;

// iOS
configuration.OnlyCentralFace = true;

OutputImageParams

If set, the uploaded image is processed according to the indicated settings.

let outputImageCrop = OutputImageCrop(type: .ratio2x3, 
                                      size: CGSize(width: 500, height: 600), 
                                  padColor: .black, 
                        returnOriginalRect: true)

let outputImageParams = OutputImageParams()

outputImageParams.crop = outputImageCrop
outputImageParams.backgroundColor = .white

configuration.outputImageParams = outputImageParams
RFSOutputImageCrop *outputImageCrop = [[RFSOutputImageCrop alloc] initWithType:RFSOutputImageCropAspectRatio2x3
                                                                        size:CGSizeMake(500, 750)
                                                                    padColor:[UIColor whiteColor]
                                                          returnOriginalRect:YES];

RFSOutputImageParams *outputImageParams = [[RFSOutputImageParams alloc] init];
outputImageParams.crop = outputImageCrop;
outputImageParams.backgroundColor = [UIColor whiteColor];

configuration.outputImageParams = outputImageParams;
val outputImageCrop = OutputImageCrop(
    OutputImageCropAspectRatio.OUTPUT_IMAGE_CROP_ASPECT_RATIO_2X3,
    Size(500, 750),
    Color.WHITE,
    true
)

val outputImageParams = OutputImageParams(outputImageCrop, Color.WHITE)
configuration.outputImageParams = outputImageParams
OutputImageCrop outputImageCrop  = new OutputImageCrop(
    OutputImageCropAspectRatio.OUTPUT_IMAGE_CROP_ASPECT_RATIO_2X3,
    new Size(500, 750),
    Color.WHITE,
    true);

OutputImageParams outputImageParams = new OutputImageParams(outputImageCrop, Color.WHITE);
configuration.setOutputImageParams(outputImageParams);
let outputImageCrop = new OutputImageCrop()
outputImageCrop.type = OutputImageCropAspectRatio.OUTPUT_IMAGE_CROP_ASPECT_RATIO_2X3
let size = new Size()
size.width = 500
size.height = 750
outputImageCrop.size = size
outputImageCrop.padColor = "#FFFFFF"
outputImageCrop.returnOriginalRect = true

let outputImageParams = new OutputImageParams()
outputImageParams.crop = outputImageCrop
outputImageParams.backgroundColor = "#FFFFFF"
configuration.outputImageParams = outputImageParams
var outputImageCrop = new OutputImageCrop();
outputImageCrop.type = OutputImageCropAspectRatio.OUTPUT_IMAGE_CROP_ASPECT_RATIO_2X3;
var size = new Size();
size.width = 500;
size.height = 750;
outputImageCrop.size = size;
outputImageCrop.padColor = "#FFFFFF";
outputImageCrop.returnOriginalRect = true;

var outputImageParams = new OutputImageParams();
outputImageParams.crop = outputImageCrop;
outputImageParams.backgroundColor = "#FFFFFF";
configuration.outputImageParams = outputImageParams;
outputImageCrop = new OutputImageCrop()
outputImageCrop.type = Enum.OutputImageCropAspectRatio.OUTPUT_IMAGE_CROP_ASPECT_RATIO_2X3
size = new Size()
size.width = 500
size.height = 750
outputImageCrop.size = size
outputImageCrop.padColor = "#FFFFFF"
outputImageCrop.returnOriginalRect = true

outputImageParams = new OutputImageParams()
outputImageParams.crop = outputImageCrop
outputImageParams.backgroundColor = "#FFFFFF"
configuration.outputImageParams = outputImageParams
let outputImageCrop = new OutputImageCrop()
outputImageCrop.type = OutputImageCropAspectRatio.OUTPUT_IMAGE_CROP_ASPECT_RATIO_2X3
let size = new Size()
size.width = 500
size.height = 750
outputImageCrop.size = size
outputImageCrop.padColor = "#FFFFFF"
outputImageCrop.returnOriginalRect = true

let outputImageParams = new OutputImageParams()
outputImageParams.crop = outputImageCrop
outputImageParams.backgroundColor = "#FFFFFF"
configuration.outputImageParams = outputImageParams
// Android
using Size = Android.Util.Size; // Add this line to the top of the file
var configuration = new DetectFacesConfiguration();
var size = new Size(500, 750);
var outputImageCrop = new OutputImageCrop(OutputImageCropAspectRatio.OutputImageCropAspectRatio2x3, size);
var outputImageParams = new OutputImageParams(outputImageCrop, 0xffffff);
configuration.OutputImageParams = outputImageParams;

// iOS
var configuration = new RFSDetectFacesConfiguration();
var size = new Size { Width = 500, Height = 750 };
var outputImageCrop = new RFSOutputImageCrop(RFSOutputImageCropAspectRatio.RFSOutputImageCropAspectRatio2x3, size);
var outputImageParams = new RFSOutputImageParams { Crop = outputImageCrop, BackgroundColor = UIColor.White };
configuration.OutputImageParams = outputImageParams;

Response

DetectFacesResponse contains the result array, see it's structure.

let scenario = detectFacesResponse.scenario
let error = detectFacesResponse.error
let allDetections = detectFacesResponse.allDetections
let detection = detectFacesResponse.detection
NSString *scenario = response.scenario;
NSError *error = response.error;
NSArray<RFSDetectFaceResult *> *detectFaceResults = response.allDetections;
RFSDetectFaceResult *detection = response.detection;
val scenario = detectFacesResponse.scenario
val error = detectFacesResponse.error
val allDetections = detectFacesResponse.allDetections
val detection = detectFacesResponse.detection
String scenario = detectFacesResponse.getScenario();
DetectFacesErrorException error = detectFacesResponse.getError();
List<DetectFaceResult> detectFaceResults = detectFacesResponse.getAllDetections();
DetectFaceResult detection = detectFacesResponse.getDetection();
let scenario = detectFacesResponse.scenario
let error = detectFacesResponse.error
let detectFaceResults = detectFacesResponse.allDetections
let detection = detectFacesResponse.detection
var scenario = detectFacesResponse?.scenario;
var error = detectFacesResponse?.error;
var detectFaceResults = detectFacesResponse?.allDetections;
var detection = detectFacesResponse?.detection;
scenario = detectFacesResponse.scenario
error = detectFacesResponse.error
detectFaceResults = detectFacesResponse.allDetections
detection = detectFacesResponse.detection
let scenario = detectFacesResponse.scenario
let error = detectFacesResponse.error
let detectFaceResults = detectFacesResponse.allDetections
let detection = detectFacesResponse.detection
// Android
var scenario = detectFacesResponse.Scenario;
var error = detectFacesResponse.Error;
var detectFaceResults = detectFacesResponse.AllDetections;
var detection = detectFacesResponse.Detection;

// iOS
var scenario = detectFacesResponse.Scenario;
var error = detectFacesResponse.Error;
var detectFaceResults = detectFacesResponse.AllDetections;
var detection = detectFacesResponse.Detection;

DetectFaceResult

for detectFaceResult in response.allDetections! {
  let qualityCompliant = detectFaceResult.isQualityCompliant
  let faceRect = detectFaceResult.faceRect
  let landMarks = detectFaceResult.landmarks
  let cropImage = detectFaceResult.crop
  let originalRect = detectFaceResult.originalRect
  let attributes = detectFaceResult.attributes
  let quality = detectFaceResult.quality
}
for (RFSDetectFaceResult *detectFaceResult in detectFaceResponse.allDetections) {
  BOOL qualityCompliant = detectFaceResult.isQualityCompliant;
  CGRect faceRect = detectFaceResult.faceRect;
  NSArray<RFSPoint *> *landmarks = detectFaceResult.landmarks;
  UIImage *cropImage = detectFaceResult.crop;
  CGRect originalRect = detectFaceResult.originalRect;
  NSArray <RFSDetectFacesAttributeResult *> *attributes = detectFaceResult.attributes;
  NSArray<RFSImageQualityResult *> *quality = detectFaceResult.quality;
}
for (detectFaceResult in detectFacesResponse.allDetections!!) {
    val qualityCompliant = detectFaceResult.isQualityCompliant
    val faceRect = detectFaceResult.faceRect
    val landMarks = detectFaceResult.landMarks
    val cropImage = detectFaceResult.cropImage
    val originalRect = detectFaceResult.originalRect
    val attributes = detectFaceResult.attributes
    val quality = detectFaceResult.quality
}
for (DetectFaceResult detectFaceResult : detectFacesResponse.getAllDetections()) {
    boolean qualityCompliant = detectFaceResult.isQualityCompliant();
    Rect faceRect = detectFaceResult.getFaceRect();
    List<Point> landMarks = detectFaceResult.getLandMarks();
    String cropImage = detectFaceResult.getCropImage();
    Rect originalRect = detectFaceResult.getOriginalRect();
    List<DetectFacesAttributeResult> attributes = detectFaceResult.getAttributes();
    List<ImageQualityResult> quality = detectFaceResult.getQuality();
}
  detectFacesResponse.allDetections.forEach(detectFaceResult => {
    let qualityCompliant = detectFaceResult.isQualityCompliant
    let faceRect = detectFaceResult.faceRect
    let landMarks = detectFaceResult.landmarks
    let cropImage = detectFaceResult.crop
    let originalRect = detectFaceResult.originalRect
    let attributes = detectFaceResult.attributes
    let quality = detectFaceResult.quality
  })
  detectFacesResponse?.allDetections.forEach((detectFaceResult) {
    var qualityCompliant = detectFaceResult?.isQualityCompliant;
    var faceRect = detectFaceResult?.faceRect;
    var landMarks = detectFaceResult?.landmarks;
    var cropImage = detectFaceResult?.crop;
    var originalRect = detectFaceResult?.originalRect;
    var attributes = detectFaceResult?.attributes;
    var quality = detectFaceResult?.quality;
  });
  detectFacesResponse.allDetections.forEach(detectFaceResult => {
    qualityCompliant = detectFaceResult.isQualityCompliant
    faceRect = detectFaceResult.faceRect
    landMarks = detectFaceResult.landmarks
    cropImage = detectFaceResult.crop
    originalRect = detectFaceResult.originalRect
    attributes = detectFaceResult.attributes
    quality = detectFaceResult.quality
  })
  detectFacesResponse.allDetections.forEach(detectFaceResult => {
    let qualityCompliant = detectFaceResult.isQualityCompliant
    let faceRect = detectFaceResult.faceRect
    let landMarks = detectFaceResult.landmarks
    let cropImage = detectFaceResult.crop
    let originalRect = detectFaceResult.originalRect
    let attributes = detectFaceResult.attributes
    let quality = detectFaceResult.quality
  })
// Android
foreach (var detectFaceResult in detectFacesResponse.AllDetections)
{
    var qualityCompliant = detectFaceResult.IsQualityCompliant;
    var faceRect = detectFaceResult.FaceRect;
    var landMarks = detectFaceResult.LandMarks;
    var cropImage = detectFaceResult.CropImage;
    var originalRect = detectFaceResult.OriginalRect;
    var attributes = detectFaceResult.Attributes;
    var quality = detectFaceResult.Quality;
}

// iOS
foreach (var detectFaceResult in detectFacesResponse.AllDetections)
{
    var qualityCompliant = detectFaceResult.IsQualityCompliant;
    var faceRect = detectFaceResult.FaceRect;
    var landMarks = detectFaceResult.Landmarks;
    var cropImage = detectFaceResult.Crop;
    var originalRect = detectFaceResult.OriginalRect;
    var attributes = detectFaceResult.Attributes;
    var quality = detectFaceResult.Quality;
}