"resourceRoots": {
"openui5.camera": "./openui5/camera/"
}
sap.ui.define([],
function() {
"use strict";
/**
* @namespace openui5.camera
*/
var CameraRenderer = {};
/**
* Renders the HTML for the control, using the provided {@link sap.ui.core.RenderManager}.
*
* @param {sap.ui.core.RenderManager} oRm RenderManager object
* @param {sap.ui.core.Control} oControl An object representation of the control that will be rendered
*/
CameraRenderer.render = function(oRm, oControl) {
var arr = [];
arr = oControl.getId().split("camera");
if(arr.length > 0)
{
oRm.write("<div class='blockcam'");
oRm.writeControlData(oControl);
oRm.writeClasses();
oRm.writeStyles();
oRm.write(">");
oRm.write("<div style='position: relative; flex-direction: row; align-items: center; justify-content: space-around;'>");
// Video of the Cam
oRm.write(
"<video id='video_control" + arr[1] +"' width='%w' height='%h' style='width: %pwpx; height: %phpx; position: absolute;' onplay='onplay'></video>"
.replace("%w", oControl.getVideoWidth())
.replace("%h", oControl.getVideoHeight())
.replace("%pw", oControl.getWidth())
.replace("%ph", oControl.getHeight())
);
//=============================================================
// Draw the Rectangle around the face on Camera Control
oRm.write(
"<canvas id='overlay" + arr[1] +"' width='%w' height='%h' style=' width: %pwpx; height: %phpx; position: absolute;'></canvas>"
.replace("%w", oControl.getVideoWidth())
.replace("%h", oControl.getVideoHeight())
.replace("%pw", oControl.getWidth())
.replace("%ph", oControl.getHeight()));
//=============================================================
// Responsible for save the detected face to make Recognition Process.
oRm.write(
"<canvas id='temp" + arr[1] +"' width='%w' height='%h' hidden='true' style=' width: %pwpx; height: %phpx; position: absolute;'></canvas>");
oRm.write("</div>");
oRm.write("</div>");
}
};
return CameraRenderer;
}, /* bExport= */ true);
var oCamera = Control.extend("openui5.camera.Camera", {
/**
* Control API
*/
metadata: {
properties: {
/**
* Width of the preview window in pixels
*/
"width": {
type: "string",
defaultValue: "448"
},
/**
* Height of the preview window in pixels
*/
"height": {
type: "string",
defaultValue: "300"
},
/**
* Width of the video capture window in pixels
*/
"videoWidth": {
type: "string",
defaultValue: "448"
},
/**
* Height of the video capture window in pixels
*/
"videoHeight": {
type: "string",
defaultValue: "300"
},
/**
* Camera ID (Connecting to PC)
*/
"deviceid": {
type: "string"
}
},
events: {
/**
* Face Decetion Event.
* Responsible to Fire the Face Detection Process
*/
"facedetect": {}
}
},
onAfterRendering: function(Oevent) {
var that = this;
var oVideo = this._getVideo();
if (oVideo && !this._displayingVideo) {
// set the camera stream on the canvas.
// Ask the user for camera access.
navigator.mediaDevices.getUserMedia({
video: {
facingMode: "environment",
width: 448,
height: 300,
deviceId: { exact: that.getDeviceid()}
}, // Back camera
audio: false
})
.then(function(stream) {
// We have a camera. Let's store the stream for later use
that._stream = stream;
oVideo.srcObject = stream;
oVideo.play();
that._displayingVideo = true;
$('video').bind('play', function(e) {
// Fire Face Detection Event.
that.fireFacedetect({});
});
})
.catch(function(err) {
jQuery.sap.log.error("Problems accessing the camera: " + err);
});
}
},
onInit: function(oControl) {
// that refer to this.
that = this;
// Json Model refer to Faces Detected
var facemodel = new JSONModel({
"FacesSet": []
});
var list = that.byId("facelist");
list.setModel(facemodel, "faces");
// Cameras Array
camss = [];
// Get All Camss that connected to your PC
navigator.mediaDevices.enumerateDevices().then(function(cameras) {
var index = 1;
for (var i in cameras) {
if (cameras[i].kind === "videoinput") {
camss.push({
"deviceid": cameras[i].deviceId,
"label": cameras[i].label
});
//==================================
// Add Custom Camera Control for each Cam detected
// camss_contents is Grid Control
that.byId("camss_contents").addContent(
new Camera({
id: "camera" + index,
width: "auto",
height: "auto",
deviceid: cameras[i].deviceId,
label: cameras[i].label,
class: "camera",
facedetect: function(oEvent) {
that.onFacedetect(this);
}
})
);
index++;
}
}
});
},
sap.ui.define([
"sap/ui/core/UIComponent",
"sap/ui/Device",
"faceZfacedetection/model/models",
"faceZfacedetection/LibFace/face-api"
], function(UIComponent, Device, models, faceapi ) {
onFacedetect: function(Control) {
// Load Model Face Recognition.
faceapi.loadFaceRecognitionModel('models/').then(function() {
// Load Model Face Detection.
faceapi.loadMtcnnModel('models/').then(function() {
// Draw the Face Detection Border in Canvas of Camera
that.calldrawing(Control);
});
});
},
calldrawing: function(Control) {
var arr = [];
arr = Control.getId().split("camera");
if (arr.length > 0) {
// Overlay is canvas that we added to the custom Camera control
var canvas = document.getElementById("overlay" + arr[1]);
var context = canvas.getContext('2d');
context.clearRect(0, 0, canvas.width, canvas.height);
var minConfidence = 0.8;
var maxResults = 10;
// Video Control is the Video tag for the Camera
var myImg = document.getElementById("video_control" + arr[1]);
var forwardParams = {
scaleSteps: [0.4, 0.2, 0.1, 0.05]
};
// Detect All Faces in the Camera
var results = faceapi.mtcnn(myImg, forwardParams);
var minConfidence = 0.9;
// defs is Array for all faces in video
results.then(function(defs) {
for (var i = 0; i < defs.length; i++) {
var faceDetection = defs[i].faceDetection;
var faceLandmarks = defs[i].faceLandmarks;
// ignore results with low confidence score
if (faceDetection.score < minConfidence) {
return;
}
// Draw the Faces in Canvas
faceapi.drawDetection('overlay' + arr[1], faceDetection);
faceapi.drawLandmarks('overlay' + arr[1], faceLandmarks);
var oCanvas = document.getElementById('temp' + arr[1]);
var context = oCanvas.getContext('2d');
var oVideo = document.getElementById('video_control' + arr[1]);
context.clearRect(0, 0, oCanvas.width, oCanvas.height);
oCanvas.width = faceDetection._box._width;
oCanvas.height = faceDetection._box._height;
context.drawImage(oVideo, faceDetection._box.x, faceDetection._box.y, faceDetection._box._width, faceDetection._box._height, 0,
0, faceDetection._box._width, faceDetection._box._height);
// Convert the Face Detected to GrayScale
that.makeGrayscale(oCanvas);
// display the face that captured
that.byId('image2').setSrc(oCanvas.toDataURL());
//add to face list
var list = that.byId("facelist");
var facemodel = list.getModel("faces");
}
//=========================================================================================
//================Compute the Distance between the two faces ==============================
//=========================================================================================
var result1;
// Getting the landmarks for each Image
// My Face
var descriptor1 = faceapi.computeFaceDescriptor(that.byId("image1").getId());
// Saved Detected Face
var descriptor2 = faceapi.computeFaceDescriptor('temp' + arr[1]);
descriptor1.then(function(x) {
result1 = x;
descriptor2.then(function(result2) {
// Recognition Function Core
var distance = faceapi.euclideanDistance(result1, result2);
if (facemodel) {
// Distance Less than 0.39 mean not matched
if (distance <= 0.39) {
// Push Face Detected with Success Matching to Face JSON Model
facemodel.getData().FacesSet.unshift({
"type": "Active",
"datenow": new Date(),
"description": Control.getId(),
"info": "Matched" + distance.toFixed(2),
"infoState": sap.ui.core.MessageType.Success,
"icon": oCanvas.toDataURL(),
"highlight": sap.ui.core.MessageType.Success
// "infoState": ""
});
} else {
// Push Face Detected with Failed Matching to Face JSON Model
facemodel.getData().FacesSet.unshift({
"type": "Active",
"datenow": new Date(),
"description": Control.getId(),
"info": "Unknown" + distance.toFixed(2),
"infoState": sap.ui.core.ValueState.Error,
"icon": oCanvas.toDataURL(),
"highlight": sap.ui.core.MessageType.Error
});
}
facemodel.refresh();
}
});
});
setTimeout(that.calldrawing, 1000, Control);
});
}
},
// Convert Face image to GrayScale
makeGrayscale: function(input) {
//Get the context for the loaded image
var inputContext = input.getContext("2d");
//get the image data;
var imageData = inputContext.getImageData(0, 0, input.width, input.height);
//Get the CanvasPixelArray
var data = imageData.data;
//Get length of all pixels in image each pixel made up of 4 elements for each pixel, one for Red, Green, Blue and Alpha
var arraylength = input.width * input.height * 4;
//Go through each pixel from bottom right to top left and alter to its gray equiv
//Common formula for converting to grayscale.
//gray = 0.3*R + 0.59*G + 0.11*B
for (var i = arraylength - 1; i > 0; i -= 4) {
//R= i-3, G = i-2 and B = i-1
//Get our gray shade using the formula
var gray = 0.3 * data[i - 3] + 0.59 * data[i - 2] + 0.11 * data[i - 1];
//Set our 3 RGB channels to the computed gray.
data[i - 3] = gray;
data[i - 2] = gray;
data[i - 1] = gray;
}
inputContext.putImageData(imageData, 0, 0);
},
You must be a registered user to add a comment. If you've already registered, sign in. Otherwise, register and sign in.
User | Count |
---|---|
6 | |
6 | |
5 | |
4 | |
4 | |
3 | |
3 | |
3 | |
3 | |
3 |